Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
RISCVMatInt.cpp
Go to the documentation of this file.
1//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "RISCVMatInt.h"
10#include "MCTargetDesc/RISCVMCTargetDesc.h"
11#include "llvm/ADT/APInt.h"
12#include "llvm/MC/MCInstBuilder.h"
13#include "llvm/Support/MathExtras.h"
14using namespacellvm;
15
16staticintgetInstSeqCost(RISCVMatInt::InstSeq &Res,boolHasRVC) {
17if (!HasRVC)
18return Res.size();
19
20intCost = 0;
21for (auto Instr : Res) {
22// Assume instructions that aren't listed aren't compressible.
23bool Compressed =false;
24switch (Instr.getOpcode()) {
25case RISCV::SLLI:
26case RISCV::SRLI:
27 Compressed =true;
28break;
29case RISCV::ADDI:
30case RISCV::ADDIW:
31case RISCV::LUI:
32 Compressed = isInt<6>(Instr.getImm());
33break;
34 }
35// Two RVC instructions take the same space as one RVI instruction, but
36// can take longer to execute than the single RVI instruction. Thus, we
37// consider that two RVC instruction are slightly more costly than one
38// RVI instruction. For longer sequences of RVC instructions the space
39// savings can be worth it, though. The costs below try to model that.
40if (!Compressed)
41Cost += 100;// Baseline cost of one RVI instruction: 100%.
42else
43Cost += 70;// 70% cost of baseline.
44 }
45returnCost;
46}
47
48// Recursively generate a sequence for materializing an integer.
49staticvoidgenerateInstSeqImpl(int64_t Val,constMCSubtargetInfo &STI,
50RISCVMatInt::InstSeq &Res) {
51bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
52
53// Use BSETI for a single bit that can't be expressed by a single LUI or ADDI.
54if (STI.hasFeature(RISCV::FeatureStdExtZbs) &&isPowerOf2_64(Val) &&
55 (!isInt<32>(Val) || Val == 0x800)) {
56 Res.emplace_back(RISCV::BSETI,Log2_64(Val));
57return;
58 }
59
60if (isInt<32>(Val)) {
61// Depending on the active bits in the immediate Value v, the following
62// instruction sequences are emitted:
63//
64// v == 0 : ADDI
65// v[0,12) != 0 && v[12,32) == 0 : ADDI
66// v[0,12) == 0 && v[12,32) != 0 : LUI
67// v[0,32) != 0 : LUI+ADDI(W)
68 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
69 int64_t Lo12 = SignExtend64<12>(Val);
70
71if (Hi20)
72 Res.emplace_back(RISCV::LUI, Hi20);
73
74if (Lo12 || Hi20 == 0) {
75unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
76 Res.emplace_back(AddiOpc, Lo12);
77 }
78return;
79 }
80
81assert(IsRV64 &&"Can't emit >32-bit imm for non-RV64 target");
82
83// In the worst case, for a full 64-bit constant, a sequence of 8 instructions
84// (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
85// that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
86// while the following ADDI instructions contribute up to 12 bits each.
87//
88// On the first glance, implementing this seems to be possible by simply
89// emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
90// shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
91// fact that ADDI performs a sign extended addition, doing it like that would
92// only be possible when at most 11 bits of the ADDI instructions are used.
93// Using all 12 bits of the ADDI instructions, like done by GAS, actually
94// requires that the constant is processed starting with the least significant
95// bit.
96//
97// In the following, constants are processed from LSB to MSB but instruction
98// emission is performed from MSB to LSB by recursively calling
99// generateInstSeq. In each recursion, first the lowest 12 bits are removed
100// from the constant and the optimal shift amount, which can be greater than
101// 12 bits if the constant is sparse, is determined. Then, the shifted
102// remaining constant is processed recursively and gets emitted as soon as it
103// fits into 32 bits. The emission of the shifts and additions is subsequently
104// performed when the recursion returns.
105
106 int64_t Lo12 = SignExtend64<12>(Val);
107 Val = (uint64_t)Val - (uint64_t)Lo12;
108
109int ShiftAmount = 0;
110boolUnsigned =false;
111
112// Val might now be valid for LUI without needing a shift.
113if (!isInt<32>(Val)) {
114 ShiftAmount =llvm::countr_zero((uint64_t)Val);
115 Val >>= ShiftAmount;
116
117// If the remaining bits don't fit in 12 bits, we might be able to reduce
118// the // shift amount in order to use LUI which will zero the lower 12
119// bits.
120if (ShiftAmount > 12 && !isInt<12>(Val)) {
121if (isInt<32>((uint64_t)Val << 12)) {
122// Reduce the shift amount and add zeros to the LSBs so it will match
123// LUI.
124 ShiftAmount -= 12;
125 Val = (uint64_t)Val << 12;
126 }elseif (isUInt<32>((uint64_t)Val << 12) &&
127 STI.hasFeature(RISCV::FeatureStdExtZba)) {
128// Reduce the shift amount and add zeros to the LSBs so it will match
129// LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
130 ShiftAmount -= 12;
131 Val = ((uint64_t)Val << 12) | (0xffffffffull << 32);
132Unsigned =true;
133 }
134 }
135
136// Try to use SLLI_UW for Val when it is uint32 but not int32.
137if (isUInt<32>((uint64_t)Val) && !isInt<32>((uint64_t)Val) &&
138 STI.hasFeature(RISCV::FeatureStdExtZba)) {
139// Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
140// SLLI_UW.
141 Val = ((uint64_t)Val) | (0xffffffffull << 32);
142Unsigned =true;
143 }
144 }
145
146generateInstSeqImpl(Val, STI, Res);
147
148// Skip shift if we were able to use LUI directly.
149if (ShiftAmount) {
150unsigned Opc =Unsigned ? RISCV::SLLI_UW : RISCV::SLLI;
151 Res.emplace_back(Opc, ShiftAmount);
152 }
153
154if (Lo12)
155 Res.emplace_back(RISCV::ADDI, Lo12);
156}
157
158staticunsignedextractRotateInfo(int64_t Val) {
159// for case: 0b111..1..xxxxxx1..1..
160unsigned LeadingOnes =llvm::countl_one((uint64_t)Val);
161unsigned TrailingOnes =llvm::countr_one((uint64_t)Val);
162if (TrailingOnes > 0 && TrailingOnes < 64 &&
163 (LeadingOnes + TrailingOnes) > (64 - 12))
164return 64 - TrailingOnes;
165
166// for case: 0bxxx1..1..1...xxx
167unsigned UpperTrailingOnes =llvm::countr_one(Hi_32(Val));
168unsigned LowerLeadingOnes =llvm::countl_one(Lo_32(Val));
169if (UpperTrailingOnes < 32 &&
170 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
171return 32 - UpperTrailingOnes;
172
173return 0;
174}
175
176staticvoidgenerateInstSeqLeadingZeros(int64_t Val,constMCSubtargetInfo &STI,
177RISCVMatInt::InstSeq &Res) {
178assert(Val > 0 &&"Expected postive val");
179
180unsigned LeadingZeros =llvm::countl_zero((uint64_t)Val);
181uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
182// Fill in the bits that will be shifted out with 1s. An example where this
183// helps is trailing one masks with 32 or more ones. This will generate
184// ADDI -1 and an SRLI.
185 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
186
187RISCVMatInt::InstSeq TmpSeq;
188generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
189
190// Keep the new sequence if it is an improvement or the original is empty.
191if ((TmpSeq.size() + 1) < Res.size() ||
192 (Res.empty() && TmpSeq.size() < 8)) {
193 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
194 Res = TmpSeq;
195 }
196
197// Some cases can benefit from filling the lower bits with zeros instead.
198 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
199 TmpSeq.clear();
200generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
201
202// Keep the new sequence if it is an improvement or the original is empty.
203if ((TmpSeq.size() + 1) < Res.size() ||
204 (Res.empty() && TmpSeq.size() < 8)) {
205 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
206 Res = TmpSeq;
207 }
208
209// If we have exactly 32 leading zeros and Zba, we can try using zext.w at
210// the end of the sequence.
211if (LeadingZeros == 32 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
212// Try replacing upper bits with 1.
213uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
214 TmpSeq.clear();
215generateInstSeqImpl(LeadingOnesVal, STI, TmpSeq);
216
217// Keep the new sequence if it is an improvement.
218if ((TmpSeq.size() + 1) < Res.size() ||
219 (Res.empty() && TmpSeq.size() < 8)) {
220 TmpSeq.emplace_back(RISCV::ADD_UW, 0);
221 Res = TmpSeq;
222 }
223 }
224}
225
226namespacellvm::RISCVMatInt {
227InstSeqgenerateInstSeq(int64_t Val,constMCSubtargetInfo &STI) {
228RISCVMatInt::InstSeq Res;
229generateInstSeqImpl(Val, STI, Res);
230
231// If the low 12 bits are non-zero, the first expansion may end with an ADDI
232// or ADDIW. If there are trailing zeros, try generating a sign extended
233// constant with no trailing zeros and use a final SLLI to restore them.
234if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) {
235unsigned TrailingZeros =llvm::countr_zero((uint64_t)Val);
236 int64_t ShiftedVal = Val >> TrailingZeros;
237// If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since
238// its more compressible. But only if LUI+ADDI(W) isn't fusable.
239// NOTE: We don't check for C extension to minimize differences in generated
240// code.
241bool IsShiftedCompressible =
242 isInt<6>(ShiftedVal) && !STI.hasFeature(RISCV::TuneLUIADDIFusion);
243RISCVMatInt::InstSeq TmpSeq;
244generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
245
246// Keep the new sequence if it is an improvement.
247if ((TmpSeq.size() + 1) < Res.size() || IsShiftedCompressible) {
248 TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros);
249 Res = TmpSeq;
250 }
251 }
252
253// If we have a 1 or 2 instruction sequence this is the best we can do. This
254// will always be true for RV32 and will often be true for RV64.
255if (Res.size() <= 2)
256return Res;
257
258assert(STI.hasFeature(RISCV::Feature64Bit) &&
259"Expected RV32 to only need 2 instructions");
260
261// If the lower 13 bits are something like 0x17ff, try to add 1 to change the
262// lower 13 bits to 0x1800. We can restore this with an ADDI of -1 at the end
263// of the sequence. Call generateInstSeqImpl on the new constant which may
264// subtract 0xfffffffffffff800 to create another ADDI. This will leave a
265// constant with more than 12 trailing zeros for the next recursive step.
266if ((Val & 0xfff) != 0 && (Val & 0x1800) == 0x1000) {
267 int64_t Imm12 = -(0x800 - (Val & 0xfff));
268 int64_t AdjustedVal = Val - Imm12;
269RISCVMatInt::InstSeq TmpSeq;
270generateInstSeqImpl(AdjustedVal, STI, TmpSeq);
271
272// Keep the new sequence if it is an improvement.
273if ((TmpSeq.size() + 1) < Res.size()) {
274 TmpSeq.emplace_back(RISCV::ADDI, Imm12);
275 Res = TmpSeq;
276 }
277 }
278
279// If the constant is positive we might be able to generate a shifted constant
280// with no leading zeros and use a final SRLI to restore them.
281if (Val > 0 && Res.size() > 2) {
282generateInstSeqLeadingZeros(Val, STI, Res);
283 }
284
285// If the constant is negative, trying inverting and using our trailing zero
286// optimizations. Use an xori to invert the final value.
287if (Val < 0 && Res.size() > 3) {
288uint64_t InvertedVal = ~(uint64_t)Val;
289RISCVMatInt::InstSeq TmpSeq;
290generateInstSeqLeadingZeros(InvertedVal, STI, TmpSeq);
291
292// Keep it if we found a sequence that is smaller after inverting.
293if (!TmpSeq.empty() && (TmpSeq.size() + 1) < Res.size()) {
294 TmpSeq.emplace_back(RISCV::XORI, -1);
295 Res = TmpSeq;
296 }
297 }
298
299// If the Low and High halves are the same, use pack. The pack instruction
300// packs the XLEN/2-bit lower halves of rs1 and rs2 into rd, with rs1 in the
301// lower half and rs2 in the upper half.
302if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbkb)) {
303 int64_t LoVal = SignExtend64<32>(Val);
304 int64_t HiVal = SignExtend64<32>(Val >> 32);
305if (LoVal == HiVal) {
306RISCVMatInt::InstSeq TmpSeq;
307generateInstSeqImpl(LoVal, STI, TmpSeq);
308if ((TmpSeq.size() + 1) < Res.size()) {
309 TmpSeq.emplace_back(RISCV::PACK, 0);
310 Res = TmpSeq;
311 }
312 }
313 }
314
315// Perform optimization with BSETI in the Zbs extension.
316if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbs)) {
317// Create a simm32 value for LUI+ADDIW by forcing the upper 33 bits to zero.
318// Xor that with original value to get which bits should be set by BSETI.
319uint64_tLo = Val & 0x7fffffff;
320uint64_tHi = Val ^Lo;
321assert(Hi != 0);
322RISCVMatInt::InstSeq TmpSeq;
323
324if (Lo != 0)
325generateInstSeqImpl(Lo, STI, TmpSeq);
326
327if (TmpSeq.size() +llvm::popcount(Hi) < Res.size()) {
328do {
329 TmpSeq.emplace_back(RISCV::BSETI,llvm::countr_zero(Hi));
330Hi &= (Hi - 1);// Clear lowest set bit.
331 }while (Hi != 0);
332 Res = TmpSeq;
333 }
334 }
335
336// Perform optimization with BCLRI in the Zbs extension.
337if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbs)) {
338// Create a simm32 value for LUI+ADDIW by forcing the upper 33 bits to one.
339// Xor that with original value to get which bits should be cleared by
340// BCLRI.
341uint64_tLo = Val | 0xffffffff80000000;
342uint64_tHi = Val ^Lo;
343assert(Hi != 0);
344
345RISCVMatInt::InstSeq TmpSeq;
346generateInstSeqImpl(Lo, STI, TmpSeq);
347
348if (TmpSeq.size() +llvm::popcount(Hi) < Res.size()) {
349do {
350 TmpSeq.emplace_back(RISCV::BCLRI,llvm::countr_zero(Hi));
351Hi &= (Hi - 1);// Clear lowest set bit.
352 }while (Hi != 0);
353 Res = TmpSeq;
354 }
355 }
356
357// Perform optimization with SH*ADD in the Zba extension.
358if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
359 int64_t Div = 0;
360unsigned Opc = 0;
361RISCVMatInt::InstSeq TmpSeq;
362// Select the opcode and divisor.
363if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
364 Div = 3;
365 Opc = RISCV::SH1ADD;
366 }elseif ((Val % 5) == 0 && isInt<32>(Val / 5)) {
367 Div = 5;
368 Opc = RISCV::SH2ADD;
369 }elseif ((Val % 9) == 0 && isInt<32>(Val / 9)) {
370 Div = 9;
371 Opc = RISCV::SH3ADD;
372 }
373// Build the new instruction sequence.
374if (Div > 0) {
375generateInstSeqImpl(Val / Div, STI, TmpSeq);
376if ((TmpSeq.size() + 1) < Res.size()) {
377 TmpSeq.emplace_back(Opc, 0);
378 Res = TmpSeq;
379 }
380 }else {
381// Try to use LUI+SH*ADD+ADDI.
382 int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull;
383 int64_t Lo12 = SignExtend64<12>(Val);
384 Div = 0;
385if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
386 Div = 3;
387 Opc = RISCV::SH1ADD;
388 }elseif (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
389 Div = 5;
390 Opc = RISCV::SH2ADD;
391 }elseif (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
392 Div = 9;
393 Opc = RISCV::SH3ADD;
394 }
395// Build the new instruction sequence.
396if (Div > 0) {
397// For Val that has zero Lo12 (implies Val equals to Hi52) should has
398// already been processed to LUI+SH*ADD by previous optimization.
399assert(Lo12 != 0 &&
400"unexpected instruction sequence for immediate materialisation");
401assert(TmpSeq.empty() &&"Expected empty TmpSeq");
402generateInstSeqImpl(Hi52 / Div, STI, TmpSeq);
403if ((TmpSeq.size() + 2) < Res.size()) {
404 TmpSeq.emplace_back(Opc, 0);
405 TmpSeq.emplace_back(RISCV::ADDI, Lo12);
406 Res = TmpSeq;
407 }
408 }
409 }
410 }
411
412// Perform optimization with rori in the Zbb and th.srri in the XTheadBb
413// extension.
414if (Res.size() > 2 && (STI.hasFeature(RISCV::FeatureStdExtZbb) ||
415 STI.hasFeature(RISCV::FeatureVendorXTHeadBb))) {
416if (unsigned Rotate =extractRotateInfo(Val)) {
417RISCVMatInt::InstSeq TmpSeq;
418uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
419assert(isInt<12>(NegImm12));
420 TmpSeq.emplace_back(RISCV::ADDI, NegImm12);
421 TmpSeq.emplace_back(STI.hasFeature(RISCV::FeatureStdExtZbb)
422 ? RISCV::RORI
423 : RISCV::TH_SRRI,
424 Rotate);
425 Res = TmpSeq;
426 }
427 }
428return Res;
429}
430
431voidgenerateMCInstSeq(int64_t Val,constMCSubtargetInfo &STI,
432MCRegister DestReg,SmallVectorImpl<MCInst> &Insts) {
433RISCVMatInt::InstSeq Seq =RISCVMatInt::generateInstSeq(Val, STI);
434
435MCRegister SrcReg = RISCV::X0;
436for (RISCVMatInt::Inst &Inst : Seq) {
437switch (Inst.getOpndKind()) {
438caseRISCVMatInt::Imm:
439 Insts.push_back(MCInstBuilder(Inst.getOpcode())
440 .addReg(DestReg)
441 .addImm(Inst.getImm()));
442break;
443caseRISCVMatInt::RegX0:
444 Insts.push_back(MCInstBuilder(Inst.getOpcode())
445 .addReg(DestReg)
446 .addReg(SrcReg)
447 .addReg(RISCV::X0));
448break;
449caseRISCVMatInt::RegReg:
450 Insts.push_back(MCInstBuilder(Inst.getOpcode())
451 .addReg(DestReg)
452 .addReg(SrcReg)
453 .addReg(SrcReg));
454break;
455caseRISCVMatInt::RegImm:
456 Insts.push_back(MCInstBuilder(Inst.getOpcode())
457 .addReg(DestReg)
458 .addReg(SrcReg)
459 .addImm(Inst.getImm()));
460break;
461 }
462
463// Only the first instruction has X0 as its source.
464 SrcReg = DestReg;
465 }
466}
467
468InstSeqgenerateTwoRegInstSeq(int64_t Val,constMCSubtargetInfo &STI,
469unsigned &ShiftAmt,unsigned &AddOpc) {
470 int64_t LoVal = SignExtend64<32>(Val);
471if (LoVal == 0)
472returnRISCVMatInt::InstSeq();
473
474// Subtract the LoVal to emulate the effect of the final ADD.
475uint64_t Tmp = (uint64_t)Val - (uint64_t)LoVal;
476assert(Tmp != 0);
477
478// Use trailing zero counts to figure how far we need to shift LoVal to line
479// up with the remaining constant.
480// TODO: This algorithm assumes all non-zero bits in the low 32 bits of the
481// final constant come from LoVal.
482unsigned TzLo =llvm::countr_zero((uint64_t)LoVal);
483unsigned TzHi =llvm::countr_zero(Tmp);
484assert(TzLo < 32 && TzHi >= 32);
485 ShiftAmt = TzHi - TzLo;
486 AddOpc = RISCV::ADD;
487
488if (Tmp == ((uint64_t)LoVal << ShiftAmt))
489returnRISCVMatInt::generateInstSeq(LoVal, STI);
490
491// If we have Zba, we can use (ADD_UW X, (SLLI X, 32)).
492if (STI.hasFeature(RISCV::FeatureStdExtZba) &&Lo_32(Val) ==Hi_32(Val)) {
493 ShiftAmt = 32;
494 AddOpc = RISCV::ADD_UW;
495returnRISCVMatInt::generateInstSeq(LoVal, STI);
496 }
497
498returnRISCVMatInt::InstSeq();
499}
500
501intgetIntMatCost(constAPInt &Val,unsignedSize,constMCSubtargetInfo &STI,
502bool CompressionCost,bool FreeZeroes) {
503bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
504boolHasRVC = CompressionCost && (STI.hasFeature(RISCV::FeatureStdExtC) ||
505 STI.hasFeature(RISCV::FeatureStdExtZca));
506int PlatRegSize = IsRV64 ? 64 : 32;
507
508// Split the constant into platform register sized chunks, and calculate cost
509// of each chunk.
510intCost = 0;
511for (unsigned ShiftVal = 0; ShiftVal <Size; ShiftVal += PlatRegSize) {
512APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
513if (FreeZeroes && Chunk.getSExtValue() == 0)
514continue;
515InstSeq MatSeq =generateInstSeq(Chunk.getSExtValue(), STI);
516Cost +=getInstSeqCost(MatSeq,HasRVC);
517 }
518return std::max(FreeZeroes ? 0 : 1,Cost);
519}
520
521OpndKindInst::getOpndKind() const{
522switch (Opc) {
523default:
524llvm_unreachable("Unexpected opcode!");
525case RISCV::LUI:
526returnRISCVMatInt::Imm;
527case RISCV::ADD_UW:
528returnRISCVMatInt::RegX0;
529case RISCV::SH1ADD:
530case RISCV::SH2ADD:
531case RISCV::SH3ADD:
532case RISCV::PACK:
533returnRISCVMatInt::RegReg;
534case RISCV::ADDI:
535case RISCV::ADDIW:
536case RISCV::XORI:
537case RISCV::SLLI:
538case RISCV::SRLI:
539case RISCV::SLLI_UW:
540case RISCV::RORI:
541case RISCV::BSETI:
542case RISCV::BCLRI:
543case RISCV::TH_SRRI:
544returnRISCVMatInt::RegImm;
545 }
546}
547
548}// namespace llvm::RISCVMatInt
APInt.h
This file implements a class to represent arbitrary precision integral constant values and operations...
Size
uint64_t Size
Definition:ELFObjHandler.cpp:81
HasRVC
bool HasRVC
Definition:ELF_riscv.cpp:502
MCInstBuilder.h
MathExtras.h
Unsigned
@ Unsigned
Definition:NVPTXISelLowering.cpp:4790
RISCVMCTargetDesc.h
generateInstSeqLeadingZeros
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
Definition:RISCVMatInt.cpp:176
generateInstSeqImpl
static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
Definition:RISCVMatInt.cpp:49
extractRotateInfo
static unsigned extractRotateInfo(int64_t Val)
Definition:RISCVMatInt.cpp:158
getInstSeqCost
static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC)
Definition:RISCVMatInt.cpp:16
RISCVMatInt.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::APInt
Class for arbitrary precision integers.
Definition:APInt.h:78
llvm::APInt::sextOrTrunc
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition:APInt.cpp:1015
llvm::APInt::ashr
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition:APInt.h:827
llvm::APInt::getSExtValue
int64_t getSExtValue() const
Get sign extended value.
Definition:APInt.h:1542
llvm::InstructionCost
Definition:InstructionCost.h:29
llvm::MCInstBuilder
Definition:MCInstBuilder.h:21
llvm::MCInstBuilder::addReg
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
Definition:MCInstBuilder.h:37
llvm::MCInstBuilder::addImm
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition:MCInstBuilder.h:43
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition:MCRegister.h:33
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition:MCSubtargetInfo.h:76
llvm::MCSubtargetInfo::hasFeature
bool hasFeature(unsigned Feature) const
Definition:MCSubtargetInfo.h:121
llvm::RISCVMatInt::Inst
Definition:RISCVMatInt.h:29
llvm::RISCVMatInt::Inst::getImm
int64_t getImm() const
Definition:RISCVMatInt.h:39
llvm::RISCVMatInt::Inst::getOpcode
unsigned getOpcode() const
Definition:RISCVMatInt.h:38
llvm::RISCVMatInt::Inst::getOpndKind
OpndKind getOpndKind() const
Definition:RISCVMatInt.cpp:521
llvm::SmallVectorBase::empty
bool empty() const
Definition:SmallVector.h:81
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition:SmallVector.h:573
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition:SmallVector.h:937
llvm::SmallVectorImpl::clear
void clear()
Definition:SmallVector.h:610
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition:SmallVector.h:1196
uint64_t
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition:ErrorHandling.h:143
llvm::RISCVMatInt
Definition:RISCVMatInt.cpp:226
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
Definition:RISCVMatInt.cpp:227
llvm::RISCVMatInt::getIntMatCost
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
Definition:RISCVMatInt.cpp:501
llvm::RISCVMatInt::OpndKind
OpndKind
Definition:RISCVMatInt.h:22
llvm::RISCVMatInt::Imm
@ Imm
Definition:RISCVMatInt.h:24
llvm::RISCVMatInt::RegX0
@ RegX0
Definition:RISCVMatInt.h:26
llvm::RISCVMatInt::RegReg
@ RegReg
Definition:RISCVMatInt.h:25
llvm::RISCVMatInt::RegImm
@ RegImm
Definition:RISCVMatInt.h:23
llvm::RISCVMatInt::InstSeq
SmallVector< Inst, 8 > InstSeq
Definition:RISCVMatInt.h:43
llvm::RISCVMatInt::generateTwoRegInstSeq
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
Definition:RISCVMatInt.cpp:468
llvm::RISCVMatInt::generateMCInstSeq
void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI, MCRegister DestReg, SmallVectorImpl< MCInst > &Insts)
Definition:RISCVMatInt.cpp:431
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::popcount
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition:bit.h:385
llvm::countr_one
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition:bit.h:307
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition:MathExtras.h:297
llvm::Log2_64
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition:MathExtras.h:347
llvm::countr_zero
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition:bit.h:215
llvm::countl_zero
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition:bit.h:281
llvm::Hi_32
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition:MathExtras.h:155
llvm::countl_one
int countl_one(T Value)
Count the number of ones from the most significant bit to the first zero bit.
Definition:bit.h:294
llvm::Lo_32
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition:MathExtras.h:160
llvm::PackElem::Hi
@ Hi
llvm::PackElem::Lo
@ Lo

Generated on Fri Jul 18 2025 14:25:14 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp