Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
RISCVCallingConv.cpp
Go to the documentation of this file.
1//===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the RISC-V Calling Convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVCallingConv.h"
14#include "RISCVSubtarget.h"
15#include "llvm/IR/DataLayout.h"
16#include "llvm/MC/MCRegister.h"
17
18using namespacellvm;
19
20// Calling Convention Implementation.
21// The expectations for frontend ABI lowering vary from target to target.
22// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
23// details, but this is a longer term goal. For now, we simply try to keep the
24// role of the frontend as simple and well-defined as possible. The rules can
25// be summarised as:
26// * Never split up large scalar arguments. We handle them here.
27// * If a hardfloat calling convention is being used, and the struct may be
28// passed in a pair of registers (fp+fp, int+fp), and both registers are
29// available, then pass as two separate arguments. If either the GPRs or FPRs
30// are exhausted, then pass according to the rule below.
31// * If a struct could never be passed in registers or directly in a stack
32// slot (as it is larger than 2*XLEN and the floating point rules don't
33// apply), then pass it using a pointer with the byval attribute.
34// * If a struct is less than 2*XLEN, then coerce to either a two-element
35// word-sized array or a 2*XLEN scalar (depending on alignment).
36// * The frontend can determine whether a struct is returned by reference or
37// not based on its size and fields. If it will be returned by reference, the
38// frontend must modify the prototype so a pointer with the sret annotation is
39// passed as the first argument. This is not necessary for large scalar
40// returns.
41// * Struct return values and varargs should be coerced to structs containing
42// register-size fields in the same situations they would be for fixed
43// arguments.
44
45staticconstMCPhysRegArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
46 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
47 RISCV::F16_H, RISCV::F17_H};
48staticconstMCPhysRegArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
49 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
50 RISCV::F16_F, RISCV::F17_F};
51staticconstMCPhysRegArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
52 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
53 RISCV::F16_D, RISCV::F17_D};
54// This is an interim calling convention and it may be changed in the future.
55staticconstMCPhysRegArgVRs[] = {
56 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
57 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
58 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
59staticconstMCPhysRegArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
60 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
61 RISCV::V20M2, RISCV::V22M2};
62staticconstMCPhysRegArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
63 RISCV::V20M4};
64staticconstMCPhysRegArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
65staticconstMCPhysRegArgVRN2M1s[] = {
66 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12,
67 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
68 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
69 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
70staticconstMCPhysRegArgVRN3M1s[] = {
71 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12,
72 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
73 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
74 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
75 RISCV::V20_V21_V22, RISCV::V21_V22_V23};
76staticconstMCPhysRegArgVRN4M1s[] = {
77 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13,
78 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
79 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
80 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
81 RISCV::V20_V21_V22_V23};
82staticconstMCPhysRegArgVRN5M1s[] = {
83 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13,
84 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
85 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
86 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
87 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
88 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
89staticconstMCPhysRegArgVRN6M1s[] = {
90 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14,
91 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
92 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
93 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
94 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
95 RISCV::V18_V19_V20_V21_V22_V23};
96staticconstMCPhysRegArgVRN7M1s[] = {
97 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15,
98 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
99 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
100 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
101 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
102staticconstMCPhysRegArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
103 RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
104 RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
105 RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
106 RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
107 RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
108 RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
109 RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
110 RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
111staticconstMCPhysRegArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2,
112 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
113 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
114 RISCV::V20M2_V22M2};
115staticconstMCPhysRegArgVRN3M2s[] = {
116 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2,
117 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
118 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
119staticconstMCPhysRegArgVRN4M2s[] = {
120 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
121 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
122 RISCV::V16M2_V18M2_V20M2_V22M2};
123staticconstMCPhysRegArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
124 RISCV::V16M4_V20M4};
125
126ArrayRef<MCPhysReg>RISCV::getArgGPRs(constRISCVABI::ABI ABI) {
127// The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
128// the ILP32E ABI.
129staticconstMCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
130 RISCV::X13, RISCV::X14, RISCV::X15,
131 RISCV::X16, RISCV::X17};
132// The GPRs used for passing arguments in the ILP32E/LP64E ABI.
133staticconstMCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
134 RISCV::X13, RISCV::X14, RISCV::X15};
135
136if (ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E)
137returnArrayRef(ArgEGPRs);
138
139returnArrayRef(ArgIGPRs);
140}
141
142staticArrayRef<MCPhysReg>getArgGPR16s(constRISCVABI::ABI ABI) {
143// The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
144// the ILP32E ABI.
145staticconstMCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
146 RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
147 RISCV::X16_H, RISCV::X17_H};
148// The GPRs used for passing arguments in the ILP32E/LP64E ABI.
149staticconstMCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
150 RISCV::X12_H, RISCV::X13_H,
151 RISCV::X14_H, RISCV::X15_H};
152
153if (ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E)
154returnArrayRef(ArgEGPRs);
155
156returnArrayRef(ArgIGPRs);
157}
158
159staticArrayRef<MCPhysReg>getArgGPR32s(constRISCVABI::ABI ABI) {
160// The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
161// the ILP32E ABI.
162staticconstMCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
163 RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
164 RISCV::X16_W, RISCV::X17_W};
165// The GPRs used for passing arguments in the ILP32E/LP64E ABI.
166staticconstMCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
167 RISCV::X12_W, RISCV::X13_W,
168 RISCV::X14_W, RISCV::X15_W};
169
170if (ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E)
171returnArrayRef(ArgEGPRs);
172
173returnArrayRef(ArgIGPRs);
174}
175
176staticArrayRef<MCPhysReg>getFastCCArgGPRs(constRISCVABI::ABI ABI) {
177// The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
178// for save-restore libcall, so we don't use them.
179// Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
180staticconstMCPhysReg FastCCIGPRs[] = {
181 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
182 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
183
184// The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
185staticconstMCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
186 RISCV::X13, RISCV::X14, RISCV::X15};
187
188if (ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E)
189returnArrayRef(FastCCEGPRs);
190
191returnArrayRef(FastCCIGPRs);
192}
193
194staticArrayRef<MCPhysReg>getFastCCArgGPRF16s(constRISCVABI::ABI ABI) {
195// The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
196// for save-restore libcall, so we don't use them.
197// Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
198staticconstMCPhysReg FastCCIGPRs[] = {
199 RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
200 RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
201 RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
202
203// The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
204staticconstMCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
205 RISCV::X12_H, RISCV::X13_H,
206 RISCV::X14_H, RISCV::X15_H};
207
208if (ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E)
209returnArrayRef(FastCCEGPRs);
210
211returnArrayRef(FastCCIGPRs);
212}
213
214staticArrayRef<MCPhysReg>getFastCCArgGPRF32s(constRISCVABI::ABI ABI) {
215// The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
216// for save-restore libcall, so we don't use them.
217// Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
218staticconstMCPhysReg FastCCIGPRs[] = {
219 RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
220 RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
221 RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
222
223// The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
224staticconstMCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
225 RISCV::X12_W, RISCV::X13_W,
226 RISCV::X14_W, RISCV::X15_W};
227
228if (ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E)
229returnArrayRef(FastCCEGPRs);
230
231returnArrayRef(FastCCIGPRs);
232}
233
234// Pass a 2*XLEN argument that has been split into two XLEN values through
235// registers or the stack as necessary.
236staticboolCC_RISCVAssign2XLen(unsigned XLen,CCState &State,CCValAssign VA1,
237ISD::ArgFlagsTy ArgFlags1,unsigned ValNo2,
238MVT ValVT2,MVT LocVT2,
239ISD::ArgFlagsTy ArgFlags2,boolEABI) {
240unsigned XLenInBytes = XLen / 8;
241constRISCVSubtarget &STI =
242 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
243ArrayRef<MCPhysReg>ArgGPRs =RISCV::getArgGPRs(STI.getTargetABI());
244
245if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
246// At least one half can be passed via register.
247 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
248 VA1.getLocVT(),CCValAssign::Full));
249 }else {
250// Both halves must be passed on the stack, with proper alignment.
251// TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
252// alignment. This behavior may be changed when RV32E/ILP32E is ratified.
253Align StackAlign(XLenInBytes);
254if (!EABI || XLen != 32)
255 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
256 State.addLoc(
257CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
258 State.AllocateStack(XLenInBytes, StackAlign),
259 VA1.getLocVT(),CCValAssign::Full));
260 State.addLoc(CCValAssign::getMem(
261 ValNo2, ValVT2, State.AllocateStack(XLenInBytes,Align(XLenInBytes)),
262 LocVT2,CCValAssign::Full));
263returnfalse;
264 }
265
266if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
267// The second half can also be passed via register.
268 State.addLoc(
269CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2,CCValAssign::Full));
270 }else {
271// The second half is passed via the stack, without additional alignment.
272 State.addLoc(CCValAssign::getMem(
273 ValNo2, ValVT2, State.AllocateStack(XLenInBytes,Align(XLenInBytes)),
274 LocVT2,CCValAssign::Full));
275 }
276
277returnfalse;
278}
279
280staticMCRegisterallocateRVVReg(MVT ValVT,unsigned ValNo,CCState &State,
281constRISCVTargetLowering &TLI) {
282constTargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
283if (RC == &RISCV::VRRegClass) {
284// Assign the first mask argument to V0.
285// This is an interim calling convention and it may be changed in the
286// future.
287if (ValVT.getVectorElementType() == MVT::i1)
288if (MCRegister Reg = State.AllocateReg(RISCV::V0))
289return Reg;
290return State.AllocateReg(ArgVRs);
291 }
292if (RC == &RISCV::VRM2RegClass)
293return State.AllocateReg(ArgVRM2s);
294if (RC == &RISCV::VRM4RegClass)
295return State.AllocateReg(ArgVRM4s);
296if (RC == &RISCV::VRM8RegClass)
297return State.AllocateReg(ArgVRM8s);
298if (RC == &RISCV::VRN2M1RegClass)
299return State.AllocateReg(ArgVRN2M1s);
300if (RC == &RISCV::VRN3M1RegClass)
301return State.AllocateReg(ArgVRN3M1s);
302if (RC == &RISCV::VRN4M1RegClass)
303return State.AllocateReg(ArgVRN4M1s);
304if (RC == &RISCV::VRN5M1RegClass)
305return State.AllocateReg(ArgVRN5M1s);
306if (RC == &RISCV::VRN6M1RegClass)
307return State.AllocateReg(ArgVRN6M1s);
308if (RC == &RISCV::VRN7M1RegClass)
309return State.AllocateReg(ArgVRN7M1s);
310if (RC == &RISCV::VRN8M1RegClass)
311return State.AllocateReg(ArgVRN8M1s);
312if (RC == &RISCV::VRN2M2RegClass)
313return State.AllocateReg(ArgVRN2M2s);
314if (RC == &RISCV::VRN3M2RegClass)
315return State.AllocateReg(ArgVRN3M2s);
316if (RC == &RISCV::VRN4M2RegClass)
317return State.AllocateReg(ArgVRN4M2s);
318if (RC == &RISCV::VRN2M4RegClass)
319return State.AllocateReg(ArgVRN2M4s);
320llvm_unreachable("Unhandled register class for ValueType");
321}
322
323// Implements the RISC-V calling convention. Returns true upon failure.
324boolllvm::CC_RISCV(unsigned ValNo,MVT ValVT,MVT LocVT,
325CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,
326CCState &State,bool IsFixed,bool IsRet,Type *OrigTy) {
327constMachineFunction &MF = State.getMachineFunction();
328constDataLayout &DL = MF.getDataLayout();
329constRISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
330constRISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
331
332unsigned XLen = Subtarget.getXLen();
333MVT XLenVT = Subtarget.getXLenVT();
334
335// Static chain parameter must not be passed in normal argument registers,
336// so we assign t2 for it as done in GCC's __builtin_call_with_static_chain
337if (ArgFlags.isNest()) {
338if (MCRegister Reg = State.AllocateReg(RISCV::X7)) {
339 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
340returnfalse;
341 }
342 }
343
344// Any return value split in to more than two values can't be returned
345// directly. Vectors are returned via the available vector registers.
346if (!LocVT.isVector() && IsRet && ValNo > 1)
347returntrue;
348
349// UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
350// variadic argument, or if no F16/F32 argument registers are available.
351bool UseGPRForF16_F32 =true;
352// UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
353// variadic argument, or if no F64 argument registers are available.
354bool UseGPRForF64 =true;
355
356RISCVABI::ABI ABI = Subtarget.getTargetABI();
357switch (ABI) {
358default:
359llvm_unreachable("Unexpected ABI");
360caseRISCVABI::ABI_ILP32:
361caseRISCVABI::ABI_ILP32E:
362caseRISCVABI::ABI_LP64:
363caseRISCVABI::ABI_LP64E:
364break;
365caseRISCVABI::ABI_ILP32F:
366caseRISCVABI::ABI_LP64F:
367 UseGPRForF16_F32 = !IsFixed;
368break;
369caseRISCVABI::ABI_ILP32D:
370caseRISCVABI::ABI_LP64D:
371 UseGPRForF16_F32 = !IsFixed;
372 UseGPRForF64 = !IsFixed;
373break;
374 }
375
376if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && !UseGPRForF16_F32) {
377if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) {
378 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
379returnfalse;
380 }
381 }
382
383if (LocVT == MVT::f32 && !UseGPRForF16_F32) {
384if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) {
385 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
386returnfalse;
387 }
388 }
389
390if (LocVT == MVT::f64 && !UseGPRForF64) {
391if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) {
392 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
393returnfalse;
394 }
395 }
396
397if ((ValVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
398if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
399 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
400returnfalse;
401 }
402 }
403
404if (ValVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
405if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
406 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
407returnfalse;
408 }
409 }
410
411ArrayRef<MCPhysReg>ArgGPRs =RISCV::getArgGPRs(ABI);
412
413// Zdinx use GPR without a bitcast when possible.
414if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
415if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
416 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
417returnfalse;
418 }
419 }
420
421// FP smaller than XLen, uses custom GPR.
422if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
423 (LocVT == MVT::f32 && XLen == 64)) {
424if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
425 LocVT = XLenVT;
426 State.addLoc(
427CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
428returnfalse;
429 }
430 }
431
432// Bitcast FP to GPR if we can use a GPR register.
433if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
434if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
435 LocVT = XLenVT;
436 LocInfo =CCValAssign::BCvt;
437 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
438returnfalse;
439 }
440 }
441
442// If this is a variadic argument, the RISC-V calling convention requires
443// that it is assigned an 'even' or 'aligned' register if it has 8-byte
444// alignment (RV32) or 16-byte alignment (RV64). An aligned register should
445// be used regardless of whether the original argument was split during
446// legalisation or not. The argument will not be passed by registers if the
447// original type is larger than 2*XLEN, so the register alignment rule does
448// not apply.
449// TODO: To be compatible with GCC's behaviors, we don't align registers
450// currently if we are using ILP32E calling convention. This behavior may be
451// changed when RV32E/ILP32E is ratified.
452unsigned TwoXLenInBytes = (2 * XLen) / 8;
453if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
454DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
455 ABI !=RISCVABI::ABI_ILP32E) {
456unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
457// Skip 'odd' register if necessary.
458if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
459 State.AllocateReg(ArgGPRs);
460 }
461
462SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
463SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
464 State.getPendingArgFlags();
465
466assert(PendingLocs.size() == PendingArgFlags.size() &&
467"PendingLocs and PendingArgFlags out of sync");
468
469// Handle passing f64 on RV32D with a soft float ABI or when floating point
470// registers are exhausted.
471if (XLen == 32 && LocVT == MVT::f64) {
472assert(PendingLocs.empty() &&"Can't lower f64 if it is split");
473// Depending on available argument GPRS, f64 may be passed in a pair of
474// GPRs, split between a GPR and the stack, or passed completely on the
475// stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
476// cases.
477MCRegister Reg = State.AllocateReg(ArgGPRs);
478if (!Reg) {
479 int64_tStackOffset = State.AllocateStack(8,Align(8));
480 State.addLoc(
481CCValAssign::getMem(ValNo, ValVT,StackOffset, LocVT, LocInfo));
482returnfalse;
483 }
484 LocVT = MVT::i32;
485 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
486MCRegister HiReg = State.AllocateReg(ArgGPRs);
487if (HiReg) {
488 State.addLoc(
489CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
490 }else {
491 int64_tStackOffset = State.AllocateStack(4,Align(4));
492 State.addLoc(
493CCValAssign::getCustomMem(ValNo, ValVT,StackOffset, LocVT, LocInfo));
494 }
495returnfalse;
496 }
497
498// Split arguments might be passed indirectly, so keep track of the pending
499// values. Split vectors are passed via a mix of registers and indirectly, so
500// treat them as we would any other argument.
501if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
502 LocVT = XLenVT;
503 LocInfo =CCValAssign::Indirect;
504 PendingLocs.push_back(
505CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
506 PendingArgFlags.push_back(ArgFlags);
507if (!ArgFlags.isSplitEnd()) {
508returnfalse;
509 }
510 }
511
512// If the split argument only had two elements, it should be passed directly
513// in registers or on the stack.
514if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
515 PendingLocs.size() <= 2) {
516assert(PendingLocs.size() == 2 &&"Unexpected PendingLocs.size()");
517// Apply the normal calling convention rules to the first half of the
518// split argument.
519CCValAssign VA = PendingLocs[0];
520ISD::ArgFlagsTy AF = PendingArgFlags[0];
521 PendingLocs.clear();
522 PendingArgFlags.clear();
523returnCC_RISCVAssign2XLen(
524 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
525 ABI ==RISCVABI::ABI_ILP32E || ABI ==RISCVABI::ABI_LP64E);
526 }
527
528// Allocate to a register if possible, or else a stack slot.
529MCRegister Reg;
530unsigned StoreSizeBytes = XLen / 8;
531Align StackAlign =Align(XLen / 8);
532
533if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) {
534 Reg =allocateRVVReg(ValVT, ValNo, State, TLI);
535if (Reg) {
536// Fixed-length vectors are located in the corresponding scalable-vector
537// container types.
538if (ValVT.isFixedLengthVector()) {
539 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
540 State.addLoc(
541CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
542returnfalse;
543 }
544 }else {
545// For return values, the vector must be passed fully via registers or
546// via the stack.
547// FIXME: The proposed vector ABI only mandates v8-v15 for return values,
548// but we're using all of them.
549if (IsRet)
550returntrue;
551// Try using a GPR to pass the address
552if ((Reg = State.AllocateReg(ArgGPRs))) {
553 LocVT = XLenVT;
554 LocInfo =CCValAssign::Indirect;
555 }elseif (ValVT.isScalableVector()) {
556 LocVT = XLenVT;
557 LocInfo =CCValAssign::Indirect;
558 }else {
559 StoreSizeBytes = ValVT.getStoreSize();
560// Align vectors to their element sizes, being careful for vXi1
561// vectors.
562 StackAlign =MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
563 }
564 }
565 }else {
566 Reg = State.AllocateReg(ArgGPRs);
567 }
568
569 int64_tStackOffset =
570 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
571
572// If we reach this point and PendingLocs is non-empty, we must be at the
573// end of a split argument that must be passed indirectly.
574if (!PendingLocs.empty()) {
575assert(ArgFlags.isSplitEnd() &&"Expected ArgFlags.isSplitEnd()");
576assert(PendingLocs.size() > 2 &&"Unexpected PendingLocs.size()");
577
578for (auto &It : PendingLocs) {
579if (Reg)
580 It.convertToReg(Reg);
581else
582 It.convertToMem(StackOffset);
583 State.addLoc(It);
584 }
585 PendingLocs.clear();
586 PendingArgFlags.clear();
587returnfalse;
588 }
589
590assert(((ValVT.isFloatingPoint() && !ValVT.isVector()) || LocVT == XLenVT ||
591 (TLI.getSubtarget().hasVInstructions() &&
592 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) &&
593"Expected an XLenVT or vector types at this stage");
594
595if (Reg) {
596 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
597returnfalse;
598 }
599
600 State.addLoc(CCValAssign::getMem(ValNo, ValVT,StackOffset, LocVT, LocInfo));
601returnfalse;
602}
603
604// FastCC has less than 1% performance improvement for some particular
605// benchmark. But theoretically, it may have benefit for some cases.
606boolllvm::CC_RISCV_FastCC(unsigned ValNo,MVT ValVT,MVT LocVT,
607CCValAssign::LocInfo LocInfo,
608ISD::ArgFlagsTy ArgFlags,CCState &State,
609bool IsFixed,bool IsRet,Type *OrigTy) {
610constMachineFunction &MF = State.getMachineFunction();
611constRISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
612constRISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
613RISCVABI::ABI ABI = Subtarget.getTargetABI();
614
615if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
616 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
617staticconstMCPhysReg FPR16List[] = {
618 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
619 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
620 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
621 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
622if (MCRegister Reg = State.AllocateReg(FPR16List)) {
623 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
624returnfalse;
625 }
626 }
627
628if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
629staticconstMCPhysReg FPR32List[] = {
630 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
631 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
632 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
633 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
634if (MCRegister Reg = State.AllocateReg(FPR32List)) {
635 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
636returnfalse;
637 }
638 }
639
640if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
641staticconstMCPhysReg FPR64List[] = {
642 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
643 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
644 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
645 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
646if (MCRegister Reg = State.AllocateReg(FPR64List)) {
647 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
648returnfalse;
649 }
650 }
651
652MVT XLenVT = Subtarget.getXLenVT();
653
654// Check if there is an available GPRF16 before hitting the stack.
655if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
656if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
657 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
658returnfalse;
659 }
660 }
661
662// Check if there is an available GPRF32 before hitting the stack.
663if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
664if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
665 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
666returnfalse;
667 }
668 }
669
670// Check if there is an available GPR before hitting the stack.
671if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
672if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
673if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
674 LocVT = XLenVT;
675 State.addLoc(
676CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
677returnfalse;
678 }
679 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
680returnfalse;
681 }
682 }
683
684ArrayRef<MCPhysReg>ArgGPRs =getFastCCArgGPRs(ABI);
685
686if (LocVT.isVector()) {
687if (MCRegister Reg =allocateRVVReg(ValVT, ValNo, State, TLI)) {
688// Fixed-length vectors are located in the corresponding scalable-vector
689// container types.
690if (LocVT.isFixedLengthVector()) {
691 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
692 State.addLoc(
693CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
694returnfalse;
695 }
696 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
697returnfalse;
698 }
699
700// Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
701// have a free GPR.
702if (LocVT.isScalableVector() ||
703 State.getFirstUnallocated(ArgGPRs) !=ArgGPRs.size()) {
704 LocInfo =CCValAssign::Indirect;
705 LocVT = XLenVT;
706 }
707 }
708
709if (LocVT == XLenVT) {
710if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
711 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
712returnfalse;
713 }
714 }
715
716if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
717 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
718Align StackAlign =MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
719 int64_tOffset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
720 State.addLoc(CCValAssign::getMem(ValNo, ValVT,Offset, LocVT, LocInfo));
721returnfalse;
722 }
723
724returntrue;// CC didn't match.
725}
726
727boolllvm::CC_RISCV_GHC(unsigned ValNo,MVT ValVT,MVT LocVT,
728CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,
729CCState &State) {
730if (ArgFlags.isNest()) {
731report_fatal_error(
732"Attribute 'nest' is not supported in GHC calling convention");
733 }
734
735staticconstMCPhysReg GPRList[] = {
736 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
737 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
738
739if (LocVT == MVT::i32 || LocVT == MVT::i64) {
740// Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
741// s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
742if (MCRegister Reg = State.AllocateReg(GPRList)) {
743 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
744returnfalse;
745 }
746 }
747
748constRISCVSubtarget &Subtarget =
749 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
750
751if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
752// Pass in STG registers: F1, ..., F6
753// fs0 ... fs5
754staticconstMCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
755 RISCV::F18_F, RISCV::F19_F,
756 RISCV::F20_F, RISCV::F21_F};
757if (MCRegister Reg = State.AllocateReg(FPR32List)) {
758 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
759returnfalse;
760 }
761 }
762
763if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
764// Pass in STG registers: D1, ..., D6
765// fs6 ... fs11
766staticconstMCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
767 RISCV::F24_D, RISCV::F25_D,
768 RISCV::F26_D, RISCV::F27_D};
769if (MCRegister Reg = State.AllocateReg(FPR64List)) {
770 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
771returnfalse;
772 }
773 }
774
775if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
776staticconstMCPhysReg GPR32List[] = {
777 RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
778 RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
779 RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
780if (MCRegister Reg = State.AllocateReg(GPR32List)) {
781 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
782returnfalse;
783 }
784 }
785
786if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
787if (MCRegister Reg = State.AllocateReg(GPRList)) {
788 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
789returnfalse;
790 }
791 }
792
793report_fatal_error("No registers left in GHC calling convention");
794returntrue;
795}
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition:ARMSLSHardening.cpp:73
DataLayout.h
ArgGPRs
const MCPhysReg ArgGPRs[]
Definition:LoongArchISelLowering.cpp:4883
MCRegister.h
ArgFPR32s
static const MCPhysReg ArgFPR32s[]
Definition:RISCVCallingConv.cpp:48
ArgVRs
static const MCPhysReg ArgVRs[]
Definition:RISCVCallingConv.cpp:55
CC_RISCVAssign2XLen
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, bool EABI)
Definition:RISCVCallingConv.cpp:236
ArgVRN2M2s
static const MCPhysReg ArgVRN2M2s[]
Definition:RISCVCallingConv.cpp:111
ArgVRM2s
static const MCPhysReg ArgVRM2s[]
Definition:RISCVCallingConv.cpp:59
allocateRVVReg
static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State, const RISCVTargetLowering &TLI)
Definition:RISCVCallingConv.cpp:280
ArgVRN3M2s
static const MCPhysReg ArgVRN3M2s[]
Definition:RISCVCallingConv.cpp:115
ArgVRN4M1s
static const MCPhysReg ArgVRN4M1s[]
Definition:RISCVCallingConv.cpp:76
ArgVRN6M1s
static const MCPhysReg ArgVRN6M1s[]
Definition:RISCVCallingConv.cpp:89
getFastCCArgGPRF32s
static ArrayRef< MCPhysReg > getFastCCArgGPRF32s(const RISCVABI::ABI ABI)
Definition:RISCVCallingConv.cpp:214
ArgVRN4M2s
static const MCPhysReg ArgVRN4M2s[]
Definition:RISCVCallingConv.cpp:119
ArgFPR64s
static const MCPhysReg ArgFPR64s[]
Definition:RISCVCallingConv.cpp:51
ArgVRN3M1s
static const MCPhysReg ArgVRN3M1s[]
Definition:RISCVCallingConv.cpp:70
ArgVRN7M1s
static const MCPhysReg ArgVRN7M1s[]
Definition:RISCVCallingConv.cpp:96
ArgVRN5M1s
static const MCPhysReg ArgVRN5M1s[]
Definition:RISCVCallingConv.cpp:82
ArgVRN2M4s
static const MCPhysReg ArgVRN2M4s[]
Definition:RISCVCallingConv.cpp:123
getFastCCArgGPRF16s
static ArrayRef< MCPhysReg > getFastCCArgGPRF16s(const RISCVABI::ABI ABI)
Definition:RISCVCallingConv.cpp:194
getArgGPR32s
static ArrayRef< MCPhysReg > getArgGPR32s(const RISCVABI::ABI ABI)
Definition:RISCVCallingConv.cpp:159
ArgVRN2M1s
static const MCPhysReg ArgVRN2M1s[]
Definition:RISCVCallingConv.cpp:65
ArgVRN8M1s
static const MCPhysReg ArgVRN8M1s[]
Definition:RISCVCallingConv.cpp:102
getArgGPR16s
static ArrayRef< MCPhysReg > getArgGPR16s(const RISCVABI::ABI ABI)
Definition:RISCVCallingConv.cpp:142
getFastCCArgGPRs
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
Definition:RISCVCallingConv.cpp:176
ArgVRM8s
static const MCPhysReg ArgVRM8s[]
Definition:RISCVCallingConv.cpp:64
ArgVRM4s
static const MCPhysReg ArgVRM4s[]
Definition:RISCVCallingConv.cpp:62
ArgFPR16s
static const MCPhysReg ArgFPR16s[]
Definition:RISCVCallingConv.cpp:45
RISCVCallingConv.h
RISCVSubtarget.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition:ArrayRef.h:41
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition:CallingConvLower.h:170
llvm::CCState::getMachineFunction
MachineFunction & getMachineFunction() const
Definition:CallingConvLower.h:240
llvm::CCState::getFirstUnallocated
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
Definition:CallingConvLower.h:315
llvm::CCState::getPendingArgFlags
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
Definition:CallingConvLower.h:488
llvm::CCState::AllocateReg
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
Definition:CallingConvLower.h:330
llvm::CCState::AllocateStack
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Definition:CallingConvLower.h:405
llvm::CCState::getPendingLocs
SmallVectorImpl< CCValAssign > & getPendingLocs()
Definition:CallingConvLower.h:483
llvm::CCState::addLoc
void addLoc(const CCValAssign &V)
Definition:CallingConvLower.h:235
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition:CallingConvLower.h:33
llvm::CCValAssign::getPending
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Definition:CallingConvLower.h:108
llvm::CCValAssign::getReg
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
Definition:CallingConvLower.h:84
llvm::CCValAssign::LocInfo
LocInfo
Definition:CallingConvLower.h:35
llvm::CCValAssign::BCvt
@ BCvt
Definition:CallingConvLower.h:46
llvm::CCValAssign::Full
@ Full
Definition:CallingConvLower.h:36
llvm::CCValAssign::Indirect
@ Indirect
Definition:CallingConvLower.h:52
llvm::CCValAssign::getCustomReg
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
Definition:CallingConvLower.h:91
llvm::CCValAssign::getMem
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
Definition:CallingConvLower.h:96
llvm::CCValAssign::getValVT
MVT getValVT() const
Definition:CallingConvLower.h:120
llvm::CCValAssign::getValNo
unsigned getValNo() const
Definition:CallingConvLower.h:119
llvm::CCValAssign::getCustomMem
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Definition:CallingConvLower.h:103
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition:CallingConvLower.h:132
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition:DataLayout.h:63
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition:MCRegister.h:33
llvm::MVT
Machine Value Type.
Definition:MachineValueType.h:35
llvm::MVT::isRISCVVectorTuple
bool isRISCVVectorTuple() const
Return true if this is a RISCV vector tuple type where the runtime length is machine dependent.
Definition:MachineValueType.h:120
llvm::MVT::getScalarSizeInBits
uint64_t getScalarSizeInBits() const
Definition:MachineValueType.h:346
llvm::MVT::isVector
bool isVector() const
Return true if this is a vector value type.
Definition:MachineValueType.h:106
llvm::MVT::isScalableVector
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
Definition:MachineValueType.h:113
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition:MachineValueType.h:308
llvm::MVT::isFixedLengthVector
bool isFixedLengthVector() const
Definition:MachineValueType.h:135
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition:MachineValueType.h:356
llvm::MVT::isScalarInteger
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
Definition:MachineValueType.h:100
llvm::MVT::getVectorElementType
MVT getVectorElementType() const
Definition:MachineValueType.h:263
llvm::MVT::isFloatingPoint
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition:MachineValueType.h:80
llvm::MachineFunction
Definition:MachineFunction.h:267
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition:MachineFunction.h:733
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition:MachineFunction.cpp:309
llvm::RISCVSubtarget
Definition:RISCVSubtarget.h:78
llvm::RISCVSubtarget::getTargetABI
RISCVABI::ABI getTargetABI() const
Definition:RISCVSubtarget.h:233
llvm::RISCVSubtarget::getXLenVT
MVT getXLenVT() const
Definition:RISCVSubtarget.h:185
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition:RISCVSubtarget.h:188
llvm::RISCVSubtarget::getTargetLowering
const RISCVTargetLowering * getTargetLowering() const override
Definition:RISCVSubtarget.h:137
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition:RISCVSubtarget.h:184
llvm::RISCVTargetLowering
Definition:RISCVISelLowering.h:510
llvm::SmallVectorBase::empty
bool empty() const
Definition:SmallVector.h:81
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition:SmallVector.h:573
llvm::SmallVectorImpl::clear
void clear()
Definition:SmallVector.h:610
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::StackOffset
StackOffset holds a fixed and a scalable offset in bytes.
Definition:TypeSize.h:33
llvm::TargetLoweringBase::getRegClassFor
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
Definition:TargetLowering.h:1042
llvm::TargetRegisterClass
Definition:TargetRegisterInfo.h:44
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition:Type.h:45
uint16_t
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition:ErrorHandling.h:143
llvm::RISCVABI::ABI
ABI
Definition:RISCVBaseInfo.h:503
llvm::RISCVABI::ABI_ILP32D
@ ABI_ILP32D
Definition:RISCVBaseInfo.h:506
llvm::RISCVABI::ABI_LP64F
@ ABI_LP64F
Definition:RISCVBaseInfo.h:509
llvm::RISCVABI::ABI_ILP32F
@ ABI_ILP32F
Definition:RISCVBaseInfo.h:505
llvm::RISCVABI::ABI_ILP32
@ ABI_ILP32
Definition:RISCVBaseInfo.h:504
llvm::RISCVABI::ABI_ILP32E
@ ABI_ILP32E
Definition:RISCVBaseInfo.h:507
llvm::RISCVABI::ABI_LP64E
@ ABI_LP64E
Definition:RISCVBaseInfo.h:511
llvm::RISCVABI::ABI_LP64
@ ABI_LP64
Definition:RISCVBaseInfo.h:508
llvm::RISCVABI::ABI_LP64D
@ ABI_LP64D
Definition:RISCVBaseInfo.h:510
llvm::RISCV::getArgGPRs
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
Definition:RISCVCallingConv.cpp:126
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::CC_RISCV_GHC
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
Definition:RISCVCallingConv.cpp:727
llvm::Offset
@ Offset
Definition:DWP.cpp:480
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition:Error.cpp:167
llvm::CC_RISCV_FastCC
bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
Definition:RISCVCallingConv.cpp:606
llvm::EABI
EABI
Definition:TargetOptions.h:73
llvm::CC_RISCV
bool CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
Definition:RISCVCallingConv.cpp:324
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition:Alignment.h:39
llvm::ISD::ArgFlagsTy
Definition:TargetCallingConv.h:27
llvm::ISD::ArgFlagsTy::isNest
bool isNest() const
Definition:TargetCallingConv.h:122
llvm::ISD::ArgFlagsTy::isSplitEnd
bool isSplitEnd() const
Definition:TargetCallingConv.h:139
llvm::ISD::ArgFlagsTy::getNonZeroOrigAlign
Align getNonZeroOrigAlign() const
Definition:TargetCallingConv.h:164
llvm::ISD::ArgFlagsTy::isSplit
bool isSplit() const
Definition:TargetCallingConv.h:136
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition:Alignment.h:117
llvm::MaybeAlign::valueOrOne
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition:Alignment.h:141

Generated on Thu Jul 17 2025 15:26:29 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp