Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
InstructionSimplify.cpp
Go to the documentation of this file.
1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements routines for folding instructions into simpler forms
10// that do not require creating new instructions. This does constant folding
11// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14// simplified: This is usually true and assuming it simplifies the logic (if
15// they have not been simplified then results are correct but maybe suboptimal).
16//
17//===----------------------------------------------------------------------===//
18
19#include "llvm/Analysis/InstructionSimplify.h"
20
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumptionCache.h"
26#include "llvm/Analysis/CaptureTracking.h"
27#include "llvm/Analysis/CmpInstAnalysis.h"
28#include "llvm/Analysis/ConstantFolding.h"
29#include "llvm/Analysis/InstSimplifyFolder.h"
30#include "llvm/Analysis/LoopAnalysisManager.h"
31#include "llvm/Analysis/MemoryBuiltins.h"
32#include "llvm/Analysis/OverflowInstAnalysis.h"
33#include "llvm/Analysis/TargetLibraryInfo.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/Analysis/VectorUtils.h"
36#include "llvm/IR/ConstantRange.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/PatternMatch.h"
43#include "llvm/IR/Statepoint.h"
44#include "llvm/Support/KnownBits.h"
45#include <algorithm>
46#include <optional>
47using namespacellvm;
48using namespacellvm::PatternMatch;
49
50#define DEBUG_TYPE "instsimplify"
51
52enum {RecursionLimit = 3 };
53
54STATISTIC(NumExpand,"Number of expansions");
55STATISTIC(NumReassoc,"Number of reassociations");
56
57staticValue *simplifyAndInst(Value *,Value *,constSimplifyQuery &,
58unsigned);
59staticValue *simplifyUnOp(unsigned,Value *,constSimplifyQuery &,unsigned);
60staticValue *simplifyFPUnOp(unsigned,Value *,constFastMathFlags &,
61constSimplifyQuery &,unsigned);
62staticValue *simplifyBinOp(unsigned,Value *,Value *,constSimplifyQuery &,
63unsigned);
64staticValue *simplifyBinOp(unsigned,Value *,Value *,constFastMathFlags &,
65constSimplifyQuery &,unsigned);
66staticValue *simplifyCmpInst(CmpPredicate,Value *,Value *,
67constSimplifyQuery &,unsigned);
68staticValue *simplifyICmpInst(CmpPredicatePredicate,Value *LHS,Value *RHS,
69constSimplifyQuery &Q,unsigned MaxRecurse);
70staticValue *simplifyOrInst(Value *,Value *,constSimplifyQuery &,unsigned);
71staticValue *simplifyXorInst(Value *,Value *,constSimplifyQuery &,
72unsigned);
73staticValue *simplifyCastInst(unsigned,Value *,Type *,constSimplifyQuery &,
74unsigned);
75staticValue *simplifyGEPInst(Type *,Value *,ArrayRef<Value *>,
76GEPNoWrapFlags,constSimplifyQuery &,unsigned);
77staticValue *simplifySelectInst(Value *,Value *,Value *,
78constSimplifyQuery &,unsigned);
79staticValue *simplifyInstructionWithOperands(Instruction *I,
80ArrayRef<Value *> NewOps,
81constSimplifyQuery &SQ,
82unsigned MaxRecurse);
83
84/// For a boolean type or a vector of boolean type, return false or a vector
85/// with every element false.
86staticConstant *getFalse(Type *Ty) {returnConstantInt::getFalse(Ty); }
87
88/// For a boolean type or a vector of boolean type, return true or a vector
89/// with every element true.
90staticConstant *getTrue(Type *Ty) {returnConstantInt::getTrue(Ty); }
91
92/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
93staticboolisSameCompare(Value *V,CmpPredicate Pred,Value *LHS,Value *RHS) {
94CmpInst *Cmp = dyn_cast<CmpInst>(V);
95if (!Cmp)
96returnfalse;
97CmpInst::Predicate CPred = Cmp->getPredicate();
98Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
99if (CPred == Pred && CLHS ==LHS && CRHS ==RHS)
100returntrue;
101return CPred ==CmpInst::getSwappedPredicate(Pred) && CLHS ==RHS &&
102 CRHS ==LHS;
103}
104
105/// Simplify comparison with true or false branch of select:
106/// %sel = select i1 %cond, i32 %tv, i32 %fv
107/// %cmp = icmp sle i32 %sel, %rhs
108/// Compose new comparison by substituting %sel with either %tv or %fv
109/// and see if it simplifies.
110staticValue *simplifyCmpSelCase(CmpPredicate Pred,Value *LHS,Value *RHS,
111Value *Cond,constSimplifyQuery &Q,
112unsigned MaxRecurse,Constant *TrueOrFalse) {
113Value *SimplifiedCmp =simplifyCmpInst(Pred,LHS,RHS, Q, MaxRecurse);
114if (SimplifiedCmp ==Cond) {
115// %cmp simplified to the select condition (%cond).
116return TrueOrFalse;
117 }elseif (!SimplifiedCmp &&isSameCompare(Cond, Pred,LHS,RHS)) {
118// It didn't simplify. However, if composed comparison is equivalent
119// to the select condition (%cond) then we can replace it.
120return TrueOrFalse;
121 }
122return SimplifiedCmp;
123}
124
125/// Simplify comparison with true branch of select
126staticValue *simplifyCmpSelTrueCase(CmpPredicate Pred,Value *LHS,Value *RHS,
127Value *Cond,constSimplifyQuery &Q,
128unsigned MaxRecurse) {
129returnsimplifyCmpSelCase(Pred,LHS,RHS,Cond, Q, MaxRecurse,
130getTrue(Cond->getType()));
131}
132
133/// Simplify comparison with false branch of select
134staticValue *simplifyCmpSelFalseCase(CmpPredicate Pred,Value *LHS,Value *RHS,
135Value *Cond,constSimplifyQuery &Q,
136unsigned MaxRecurse) {
137returnsimplifyCmpSelCase(Pred,LHS,RHS,Cond, Q, MaxRecurse,
138getFalse(Cond->getType()));
139}
140
141/// We know comparison with both branches of select can be simplified, but they
142/// are not equal. This routine handles some logical simplifications.
143staticValue *handleOtherCmpSelSimplifications(Value *TCmp,Value *FCmp,
144Value *Cond,
145constSimplifyQuery &Q,
146unsigned MaxRecurse) {
147// If the false value simplified to false, then the result of the compare
148// is equal to "Cond && TCmp". This also catches the case when the false
149// value simplified to false and the true value to true, returning "Cond".
150// Folding select to and/or isn't poison-safe in general; impliesPoison
151// checks whether folding it does not convert a well-defined value into
152// poison.
153if (match(FCmp,m_Zero()) &&impliesPoison(TCmp,Cond))
154if (Value *V =simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
155return V;
156// If the true value simplified to true, then the result of the compare
157// is equal to "Cond || FCmp".
158if (match(TCmp,m_One()) &&impliesPoison(FCmp,Cond))
159if (Value *V =simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
160return V;
161// Finally, if the false value simplified to true and the true value to
162// false, then the result of the compare is equal to "!Cond".
163if (match(FCmp,m_One()) &&match(TCmp,m_Zero()))
164if (Value *V =simplifyXorInst(
165Cond,Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
166return V;
167returnnullptr;
168}
169
170/// Does the given value dominate the specified phi node?
171staticboolvalueDominatesPHI(Value *V,PHINode *P,constDominatorTree *DT) {
172Instruction *I = dyn_cast<Instruction>(V);
173if (!I)
174// Arguments and constants dominate all instructions.
175returntrue;
176
177// If we have a DominatorTree then do a precise test.
178if (DT)
179return DT->dominates(I,P);
180
181// Otherwise, if the instruction is in the entry block and is not an invoke,
182// then it obviously dominates all phi nodes.
183if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
184 !isa<CallBrInst>(I))
185returntrue;
186
187returnfalse;
188}
189
190/// Try to simplify a binary operator of form "V op OtherOp" where V is
191/// "(B0 opex B1)" by distributing 'op' across 'opex' as
192/// "(B0 op OtherOp) opex (B1 op OtherOp)".
193staticValue *expandBinOp(Instruction::BinaryOps Opcode,Value *V,
194Value *OtherOp,Instruction::BinaryOps OpcodeToExpand,
195constSimplifyQuery &Q,unsigned MaxRecurse) {
196auto *B = dyn_cast<BinaryOperator>(V);
197if (!B ||B->getOpcode() != OpcodeToExpand)
198returnnullptr;
199Value *B0 =B->getOperand(0), *B1 =B->getOperand(1);
200Value *L =
201simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
202if (!L)
203returnnullptr;
204Value *R =
205simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
206if (!R)
207returnnullptr;
208
209// Does the expanded pair of binops simplify to the existing binop?
210if ((L == B0 && R == B1) ||
211 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
212 ++NumExpand;
213returnB;
214 }
215
216// Otherwise, return "L op' R" if it simplifies.
217Value *S =simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
218if (!S)
219returnnullptr;
220
221 ++NumExpand;
222return S;
223}
224
225/// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
226/// distributing op over op'.
227staticValue *expandCommutativeBinOp(Instruction::BinaryOps Opcode,Value *L,
228Value *R,
229Instruction::BinaryOps OpcodeToExpand,
230constSimplifyQuery &Q,
231unsigned MaxRecurse) {
232// Recursion is always used, so bail out at once if we already hit the limit.
233if (!MaxRecurse--)
234returnnullptr;
235
236if (Value *V =expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
237return V;
238if (Value *V =expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
239return V;
240returnnullptr;
241}
242
243/// Generic simplifications for associative binary operations.
244/// Returns the simpler value, or null if none was found.
245staticValue *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
246Value *LHS,Value *RHS,
247constSimplifyQuery &Q,
248unsigned MaxRecurse) {
249assert(Instruction::isAssociative(Opcode) &&"Not an associative operation!");
250
251// Recursion is always used, so bail out at once if we already hit the limit.
252if (!MaxRecurse--)
253returnnullptr;
254
255BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
256BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
257
258// Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
259if (Op0 && Op0->getOpcode() == Opcode) {
260Value *A = Op0->getOperand(0);
261Value *B = Op0->getOperand(1);
262Value *C =RHS;
263
264// Does "B op C" simplify?
265if (Value *V =simplifyBinOp(Opcode,B,C, Q, MaxRecurse)) {
266// It does! Return "A op V" if it simplifies or is already available.
267// If V equals B then "A op V" is just the LHS.
268if (V ==B)
269returnLHS;
270// Otherwise return "A op V" if it simplifies.
271if (Value *W =simplifyBinOp(Opcode,A, V, Q, MaxRecurse)) {
272 ++NumReassoc;
273return W;
274 }
275 }
276 }
277
278// Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
279if (Op1 && Op1->getOpcode() == Opcode) {
280Value *A =LHS;
281Value *B = Op1->getOperand(0);
282Value *C = Op1->getOperand(1);
283
284// Does "A op B" simplify?
285if (Value *V =simplifyBinOp(Opcode,A,B, Q, MaxRecurse)) {
286// It does! Return "V op C" if it simplifies or is already available.
287// If V equals B then "V op C" is just the RHS.
288if (V ==B)
289returnRHS;
290// Otherwise return "V op C" if it simplifies.
291if (Value *W =simplifyBinOp(Opcode, V,C, Q, MaxRecurse)) {
292 ++NumReassoc;
293return W;
294 }
295 }
296 }
297
298// The remaining transforms require commutativity as well as associativity.
299if (!Instruction::isCommutative(Opcode))
300returnnullptr;
301
302// Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
303if (Op0 && Op0->getOpcode() == Opcode) {
304Value *A = Op0->getOperand(0);
305Value *B = Op0->getOperand(1);
306Value *C =RHS;
307
308// Does "C op A" simplify?
309if (Value *V =simplifyBinOp(Opcode,C,A, Q, MaxRecurse)) {
310// It does! Return "V op B" if it simplifies or is already available.
311// If V equals A then "V op B" is just the LHS.
312if (V ==A)
313returnLHS;
314// Otherwise return "V op B" if it simplifies.
315if (Value *W =simplifyBinOp(Opcode, V,B, Q, MaxRecurse)) {
316 ++NumReassoc;
317return W;
318 }
319 }
320 }
321
322// Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
323if (Op1 && Op1->getOpcode() == Opcode) {
324Value *A =LHS;
325Value *B = Op1->getOperand(0);
326Value *C = Op1->getOperand(1);
327
328// Does "C op A" simplify?
329if (Value *V =simplifyBinOp(Opcode,C,A, Q, MaxRecurse)) {
330// It does! Return "B op V" if it simplifies or is already available.
331// If V equals C then "B op V" is just the RHS.
332if (V ==C)
333returnRHS;
334// Otherwise return "B op V" if it simplifies.
335if (Value *W =simplifyBinOp(Opcode,B, V, Q, MaxRecurse)) {
336 ++NumReassoc;
337return W;
338 }
339 }
340 }
341
342returnnullptr;
343}
344
345/// In the case of a binary operation with a select instruction as an operand,
346/// try to simplify the binop by seeing whether evaluating it on both branches
347/// of the select results in the same value. Returns the common value if so,
348/// otherwise returns null.
349staticValue *threadBinOpOverSelect(Instruction::BinaryOps Opcode,Value *LHS,
350Value *RHS,constSimplifyQuery &Q,
351unsigned MaxRecurse) {
352// Recursion is always used, so bail out at once if we already hit the limit.
353if (!MaxRecurse--)
354returnnullptr;
355
356SelectInst *SI;
357if (isa<SelectInst>(LHS)) {
358 SI = cast<SelectInst>(LHS);
359 }else {
360assert(isa<SelectInst>(RHS) &&"No select instruction operand!");
361 SI = cast<SelectInst>(RHS);
362 }
363
364// Evaluate the BinOp on the true and false branches of the select.
365Value *TV;
366Value *FV;
367if (SI ==LHS) {
368 TV =simplifyBinOp(Opcode, SI->getTrueValue(),RHS, Q, MaxRecurse);
369 FV =simplifyBinOp(Opcode, SI->getFalseValue(),RHS, Q, MaxRecurse);
370 }else {
371 TV =simplifyBinOp(Opcode,LHS, SI->getTrueValue(), Q, MaxRecurse);
372 FV =simplifyBinOp(Opcode,LHS, SI->getFalseValue(), Q, MaxRecurse);
373 }
374
375// If they simplified to the same value, then return the common value.
376// If they both failed to simplify then return null.
377if (TV == FV)
378return TV;
379
380// If one branch simplified to undef, return the other one.
381if (TV && Q.isUndefValue(TV))
382return FV;
383if (FV && Q.isUndefValue(FV))
384return TV;
385
386// If applying the operation did not change the true and false select values,
387// then the result of the binop is the select itself.
388if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
389return SI;
390
391// If one branch simplified and the other did not, and the simplified
392// value is equal to the unsimplified one, return the simplified value.
393// For example, select (cond, X, X & Z) & Z -> X & Z.
394if ((FV && !TV) || (TV && !FV)) {
395// Check that the simplified value has the form "X op Y" where "op" is the
396// same as the original operation.
397Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
398if (Simplified && Simplified->getOpcode() ==unsigned(Opcode) &&
399 !Simplified->hasPoisonGeneratingFlags()) {
400// The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
401// We already know that "op" is the same as for the simplified value. See
402// if the operands match too. If so, return the simplified value.
403Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
404Value *UnsimplifiedLHS = SI ==LHS ? UnsimplifiedBranch :LHS;
405Value *UnsimplifiedRHS = SI ==LHS ?RHS : UnsimplifiedBranch;
406if (Simplified->getOperand(0) == UnsimplifiedLHS &&
407 Simplified->getOperand(1) == UnsimplifiedRHS)
408return Simplified;
409if (Simplified->isCommutative() &&
410 Simplified->getOperand(1) == UnsimplifiedLHS &&
411 Simplified->getOperand(0) == UnsimplifiedRHS)
412return Simplified;
413 }
414 }
415
416returnnullptr;
417}
418
419/// In the case of a comparison with a select instruction, try to simplify the
420/// comparison by seeing whether both branches of the select result in the same
421/// value. Returns the common value if so, otherwise returns null.
422/// For example, if we have:
423/// %tmp = select i1 %cmp, i32 1, i32 2
424/// %cmp1 = icmp sle i32 %tmp, 3
425/// We can simplify %cmp1 to true, because both branches of select are
426/// less than 3. We compose new comparison by substituting %tmp with both
427/// branches of select and see if it can be simplified.
428staticValue *threadCmpOverSelect(CmpPredicate Pred,Value *LHS,Value *RHS,
429constSimplifyQuery &Q,unsigned MaxRecurse) {
430// Recursion is always used, so bail out at once if we already hit the limit.
431if (!MaxRecurse--)
432returnnullptr;
433
434// Make sure the select is on the LHS.
435if (!isa<SelectInst>(LHS)) {
436std::swap(LHS,RHS);
437 Pred =CmpInst::getSwappedPredicate(Pred);
438 }
439assert(isa<SelectInst>(LHS) &&"Not comparing with a select instruction!");
440SelectInst *SI = cast<SelectInst>(LHS);
441Value *Cond = SI->getCondition();
442Value *TV = SI->getTrueValue();
443Value *FV = SI->getFalseValue();
444
445// Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
446// Does "cmp TV, RHS" simplify?
447Value *TCmp =simplifyCmpSelTrueCase(Pred, TV,RHS,Cond, Q, MaxRecurse);
448if (!TCmp)
449returnnullptr;
450
451// Does "cmp FV, RHS" simplify?
452Value *FCmp =simplifyCmpSelFalseCase(Pred, FV,RHS,Cond, Q, MaxRecurse);
453if (!FCmp)
454returnnullptr;
455
456// If both sides simplified to the same value, then use it as the result of
457// the original comparison.
458if (TCmp == FCmp)
459return TCmp;
460
461// The remaining cases only make sense if the select condition has the same
462// type as the result of the comparison, so bail out if this is not so.
463if (Cond->getType()->isVectorTy() ==RHS->getType()->isVectorTy())
464returnhandleOtherCmpSelSimplifications(TCmp, FCmp,Cond, Q, MaxRecurse);
465
466returnnullptr;
467}
468
469/// In the case of a binary operation with an operand that is a PHI instruction,
470/// try to simplify the binop by seeing whether evaluating it on the incoming
471/// phi values yields the same result for every value. If so returns the common
472/// value, otherwise returns null.
473staticValue *threadBinOpOverPHI(Instruction::BinaryOps Opcode,Value *LHS,
474Value *RHS,constSimplifyQuery &Q,
475unsigned MaxRecurse) {
476// Recursion is always used, so bail out at once if we already hit the limit.
477if (!MaxRecurse--)
478returnnullptr;
479
480PHINode *PI;
481if (isa<PHINode>(LHS)) {
482 PI = cast<PHINode>(LHS);
483// Bail out if RHS and the phi may be mutually interdependent due to a loop.
484if (!valueDominatesPHI(RHS, PI, Q.DT))
485returnnullptr;
486 }else {
487assert(isa<PHINode>(RHS) &&"No PHI instruction operand!");
488 PI = cast<PHINode>(RHS);
489// Bail out if LHS and the phi may be mutually interdependent due to a loop.
490if (!valueDominatesPHI(LHS, PI, Q.DT))
491returnnullptr;
492 }
493
494// Evaluate the BinOp on the incoming phi values.
495Value *CommonValue =nullptr;
496for (Use &Incoming : PI->incoming_values()) {
497// If the incoming value is the phi node itself, it can safely be skipped.
498if (Incoming == PI)
499continue;
500Instruction *InTI = PI->getIncomingBlock(Incoming)->getTerminator();
501Value *V = PI ==LHS
502 ?simplifyBinOp(Opcode,Incoming,RHS,
503 Q.getWithInstruction(InTI), MaxRecurse)
504 :simplifyBinOp(Opcode,LHS,Incoming,
505 Q.getWithInstruction(InTI), MaxRecurse);
506// If the operation failed to simplify, or simplified to a different value
507// to previously, then give up.
508if (!V || (CommonValue && V != CommonValue))
509returnnullptr;
510 CommonValue = V;
511 }
512
513return CommonValue;
514}
515
516/// In the case of a comparison with a PHI instruction, try to simplify the
517/// comparison by seeing whether comparing with all of the incoming phi values
518/// yields the same result every time. If so returns the common result,
519/// otherwise returns null.
520staticValue *threadCmpOverPHI(CmpPredicate Pred,Value *LHS,Value *RHS,
521constSimplifyQuery &Q,unsigned MaxRecurse) {
522// Recursion is always used, so bail out at once if we already hit the limit.
523if (!MaxRecurse--)
524returnnullptr;
525
526// Make sure the phi is on the LHS.
527if (!isa<PHINode>(LHS)) {
528std::swap(LHS,RHS);
529 Pred =CmpInst::getSwappedPredicate(Pred);
530 }
531assert(isa<PHINode>(LHS) &&"Not comparing with a phi instruction!");
532PHINode *PI = cast<PHINode>(LHS);
533
534// Bail out if RHS and the phi may be mutually interdependent due to a loop.
535if (!valueDominatesPHI(RHS, PI, Q.DT))
536returnnullptr;
537
538// Evaluate the BinOp on the incoming phi values.
539Value *CommonValue =nullptr;
540for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
541Value *Incoming = PI->getIncomingValue(u);
542Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
543// If the incoming value is the phi node itself, it can safely be skipped.
544if (Incoming == PI)
545continue;
546// Change the context instruction to the "edge" that flows into the phi.
547// This is important because that is where incoming is actually "evaluated"
548// even though it is used later somewhere else.
549Value *V =simplifyCmpInst(Pred,Incoming,RHS, Q.getWithInstruction(InTI),
550 MaxRecurse);
551// If the operation failed to simplify, or simplified to a different value
552// to previously, then give up.
553if (!V || (CommonValue && V != CommonValue))
554returnnullptr;
555 CommonValue = V;
556 }
557
558return CommonValue;
559}
560
561staticConstant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
562Value *&Op0,Value *&Op1,
563constSimplifyQuery &Q) {
564if (auto *CLHS = dyn_cast<Constant>(Op0)) {
565if (auto *CRHS = dyn_cast<Constant>(Op1)) {
566switch (Opcode) {
567default:
568break;
569case Instruction::FAdd:
570case Instruction::FSub:
571case Instruction::FMul:
572case Instruction::FDiv:
573case Instruction::FRem:
574if (Q.CxtI !=nullptr)
575returnConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
576 }
577returnConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
578 }
579
580// Canonicalize the constant to the RHS if this is a commutative operation.
581if (Instruction::isCommutative(Opcode))
582std::swap(Op0, Op1);
583 }
584returnnullptr;
585}
586
587/// Given operands for an Add, see if we can fold the result.
588/// If not, this returns null.
589staticValue *simplifyAddInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
590constSimplifyQuery &Q,unsigned MaxRecurse) {
591if (Constant *C =foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
592returnC;
593
594// X + poison -> poison
595if (isa<PoisonValue>(Op1))
596return Op1;
597
598// X + undef -> undef
599if (Q.isUndefValue(Op1))
600return Op1;
601
602// X + 0 -> X
603if (match(Op1,m_Zero()))
604return Op0;
605
606// If two operands are negative, return 0.
607if (isKnownNegation(Op0, Op1))
608returnConstant::getNullValue(Op0->getType());
609
610// X + (Y - X) -> Y
611// (Y - X) + X -> Y
612// Eg: X + -X -> 0
613Value *Y =nullptr;
614if (match(Op1,m_Sub(m_Value(Y),m_Specific(Op0))) ||
615match(Op0,m_Sub(m_Value(Y),m_Specific(Op1))))
616returnY;
617
618// X + ~X -> -1 since ~X = -X-1
619Type *Ty = Op0->getType();
620if (match(Op0,m_Not(m_Specific(Op1))) ||match(Op1,m_Not(m_Specific(Op0))))
621returnConstant::getAllOnesValue(Ty);
622
623// add nsw/nuw (xor Y, signmask), signmask --> Y
624// The no-wrapping add guarantees that the top bit will be set by the add.
625// Therefore, the xor must be clearing the already set sign bit of Y.
626if ((IsNSW || IsNUW) &&match(Op1,m_SignMask()) &&
627match(Op0,m_Xor(m_Value(Y),m_SignMask())))
628returnY;
629
630// add nuw %x, -1 -> -1, because %x can only be 0.
631if (IsNUW &&match(Op1,m_AllOnes()))
632return Op1;// Which is -1.
633
634 /// i1 add -> xor.
635if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
636if (Value *V =simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
637return V;
638
639// Try some generic simplifications for associative operations.
640if (Value *V =
641simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
642return V;
643
644// Threading Add over selects and phi nodes is pointless, so don't bother.
645// Threading over the select in "A + select(cond, B, C)" means evaluating
646// "A+B" and "A+C" and seeing if they are equal; but they are equal if and
647// only if B and C are equal. If B and C are equal then (since we assume
648// that operands have already been simplified) "select(cond, B, C)" should
649// have been simplified to the common value of B and C already. Analysing
650// "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
651// for threading over phi nodes.
652
653returnnullptr;
654}
655
656Value *llvm::simplifyAddInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
657constSimplifyQuery &Query) {
658 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query,RecursionLimit);
659}
660
661/// Compute the base pointer and cumulative constant offsets for V.
662///
663/// This strips all constant offsets off of V, leaving it the base pointer, and
664/// accumulates the total constant offset applied in the returned constant.
665/// It returns zero if there are no constant offsets applied.
666///
667/// This is very similar to stripAndAccumulateConstantOffsets(), except it
668/// normalizes the offset bitwidth to the stripped pointer type, not the
669/// original pointer type.
670staticAPIntstripAndComputeConstantOffsets(constDataLayout &DL,Value *&V,
671bool AllowNonInbounds =false) {
672assert(V->getType()->isPtrOrPtrVectorTy());
673
674APIntOffset =APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
675 V = V->stripAndAccumulateConstantOffsets(DL,Offset, AllowNonInbounds);
676// As that strip may trace through `addrspacecast`, need to sext or trunc
677// the offset calculated.
678returnOffset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
679}
680
681/// Compute the constant difference between two pointer values.
682/// If the difference is not a constant, returns zero.
683staticConstant *computePointerDifference(constDataLayout &DL,Value *LHS,
684Value *RHS) {
685APInt LHSOffset =stripAndComputeConstantOffsets(DL,LHS);
686APInt RHSOffset =stripAndComputeConstantOffsets(DL,RHS);
687
688// If LHS and RHS are not related via constant offsets to the same base
689// value, there is nothing we can do here.
690if (LHS !=RHS)
691returnnullptr;
692
693// Otherwise, the difference of LHS - RHS can be computed as:
694// LHS - RHS
695// = (LHSOffset + Base) - (RHSOffset + Base)
696// = LHSOffset - RHSOffset
697Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
698if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
699 Res =ConstantVector::getSplat(VecTy->getElementCount(), Res);
700return Res;
701}
702
703/// Test if there is a dominating equivalence condition for the
704/// two operands. If there is, try to reduce the binary operation
705/// between the two operands.
706/// Example: Op0 - Op1 --> 0 when Op0 == Op1
707staticValue *simplifyByDomEq(unsigned Opcode,Value *Op0,Value *Op1,
708constSimplifyQuery &Q,unsigned MaxRecurse) {
709// Recursive run it can not get any benefit
710if (MaxRecurse !=RecursionLimit)
711returnnullptr;
712
713 std::optional<bool> Imp =
714isImpliedByDomCondition(CmpInst::ICMP_EQ, Op0, Op1, Q.CxtI, Q.DL);
715if (Imp && *Imp) {
716Type *Ty = Op0->getType();
717switch (Opcode) {
718case Instruction::Sub:
719case Instruction::Xor:
720case Instruction::URem:
721case Instruction::SRem:
722returnConstant::getNullValue(Ty);
723
724case Instruction::SDiv:
725case Instruction::UDiv:
726return ConstantInt::get(Ty, 1);
727
728case Instruction::And:
729case Instruction::Or:
730// Could be either one - choose Op1 since that's more likely a constant.
731return Op1;
732default:
733break;
734 }
735 }
736returnnullptr;
737}
738
739/// Given operands for a Sub, see if we can fold the result.
740/// If not, this returns null.
741staticValue *simplifySubInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
742constSimplifyQuery &Q,unsigned MaxRecurse) {
743if (Constant *C =foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
744returnC;
745
746// X - poison -> poison
747// poison - X -> poison
748if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
749returnPoisonValue::get(Op0->getType());
750
751// X - undef -> undef
752// undef - X -> undef
753if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
754returnUndefValue::get(Op0->getType());
755
756// X - 0 -> X
757if (match(Op1,m_Zero()))
758return Op0;
759
760// X - X -> 0
761if (Op0 == Op1)
762returnConstant::getNullValue(Op0->getType());
763
764// Is this a negation?
765if (match(Op0,m_Zero())) {
766// 0 - X -> 0 if the sub is NUW.
767if (IsNUW)
768returnConstant::getNullValue(Op0->getType());
769
770KnownBits Known =computeKnownBits(Op1,/* Depth */ 0, Q);
771if (Known.Zero.isMaxSignedValue()) {
772// Op1 is either 0 or the minimum signed value. If the sub is NSW, then
773// Op1 must be 0 because negating the minimum signed value is undefined.
774if (IsNSW)
775returnConstant::getNullValue(Op0->getType());
776
777// 0 - X -> X if X is 0 or the minimum signed value.
778return Op1;
779 }
780 }
781
782// (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
783// For example, (X + Y) - Y -> X; (Y + X) - Y -> X
784Value *X =nullptr, *Y =nullptr, *Z = Op1;
785if (MaxRecurse &&match(Op0,m_Add(m_Value(X),m_Value(Y)))) {// (X + Y) - Z
786// See if "V === Y - Z" simplifies.
787if (Value *V =simplifyBinOp(Instruction::Sub,Y, Z, Q, MaxRecurse - 1))
788// It does! Now see if "X + V" simplifies.
789if (Value *W =simplifyBinOp(Instruction::Add,X, V, Q, MaxRecurse - 1)) {
790// It does, we successfully reassociated!
791 ++NumReassoc;
792return W;
793 }
794// See if "V === X - Z" simplifies.
795if (Value *V =simplifyBinOp(Instruction::Sub,X, Z, Q, MaxRecurse - 1))
796// It does! Now see if "Y + V" simplifies.
797if (Value *W =simplifyBinOp(Instruction::Add,Y, V, Q, MaxRecurse - 1)) {
798// It does, we successfully reassociated!
799 ++NumReassoc;
800return W;
801 }
802 }
803
804// X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
805// For example, X - (X + 1) -> -1
806X = Op0;
807if (MaxRecurse &&match(Op1,m_Add(m_Value(Y),m_Value(Z)))) {// X - (Y + Z)
808// See if "V === X - Y" simplifies.
809if (Value *V =simplifyBinOp(Instruction::Sub,X,Y, Q, MaxRecurse - 1))
810// It does! Now see if "V - Z" simplifies.
811if (Value *W =simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
812// It does, we successfully reassociated!
813 ++NumReassoc;
814return W;
815 }
816// See if "V === X - Z" simplifies.
817if (Value *V =simplifyBinOp(Instruction::Sub,X, Z, Q, MaxRecurse - 1))
818// It does! Now see if "V - Y" simplifies.
819if (Value *W =simplifyBinOp(Instruction::Sub, V,Y, Q, MaxRecurse - 1)) {
820// It does, we successfully reassociated!
821 ++NumReassoc;
822return W;
823 }
824 }
825
826// Z - (X - Y) -> (Z - X) + Y if everything simplifies.
827// For example, X - (X - Y) -> Y.
828 Z = Op0;
829if (MaxRecurse &&match(Op1,m_Sub(m_Value(X),m_Value(Y))))// Z - (X - Y)
830// See if "V === Z - X" simplifies.
831if (Value *V =simplifyBinOp(Instruction::Sub, Z,X, Q, MaxRecurse - 1))
832// It does! Now see if "V + Y" simplifies.
833if (Value *W =simplifyBinOp(Instruction::Add, V,Y, Q, MaxRecurse - 1)) {
834// It does, we successfully reassociated!
835 ++NumReassoc;
836return W;
837 }
838
839// trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
840if (MaxRecurse &&match(Op0,m_Trunc(m_Value(X))) &&
841match(Op1,m_Trunc(m_Value(Y))))
842if (X->getType() ==Y->getType())
843// See if "V === X - Y" simplifies.
844if (Value *V =simplifyBinOp(Instruction::Sub,X,Y, Q, MaxRecurse - 1))
845// It does! Now see if "trunc V" simplifies.
846if (Value *W =simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
847 Q, MaxRecurse - 1))
848// It does, return the simplified "trunc V".
849return W;
850
851// Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
852if (match(Op0,m_PtrToInt(m_Value(X))) &&match(Op1,m_PtrToInt(m_Value(Y))))
853if (Constant *Result =computePointerDifference(Q.DL,X,Y))
854returnConstantFoldIntegerCast(Result, Op0->getType(),/*IsSigned*/true,
855 Q.DL);
856
857// i1 sub -> xor.
858if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
859if (Value *V =simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
860return V;
861
862// Threading Sub over selects and phi nodes is pointless, so don't bother.
863// Threading over the select in "A - select(cond, B, C)" means evaluating
864// "A-B" and "A-C" and seeing if they are equal; but they are equal if and
865// only if B and C are equal. If B and C are equal then (since we assume
866// that operands have already been simplified) "select(cond, B, C)" should
867// have been simplified to the common value of B and C already. Analysing
868// "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
869// for threading over phi nodes.
870
871if (Value *V =simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
872return V;
873
874// (sub nuw C_Mask, (xor X, C_Mask)) -> X
875if (IsNUW) {
876Value *X;
877if (match(Op1,m_Xor(m_Value(X),m_Specific(Op0))) &&
878match(Op0,m_LowBitMask()))
879returnX;
880 }
881
882returnnullptr;
883}
884
885Value *llvm::simplifySubInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
886constSimplifyQuery &Q) {
887 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q,RecursionLimit);
888}
889
890/// Given operands for a Mul, see if we can fold the result.
891/// If not, this returns null.
892staticValue *simplifyMulInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
893constSimplifyQuery &Q,unsigned MaxRecurse) {
894if (Constant *C =foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
895returnC;
896
897// X * poison -> poison
898if (isa<PoisonValue>(Op1))
899return Op1;
900
901// X * undef -> 0
902// X * 0 -> 0
903if (Q.isUndefValue(Op1) ||match(Op1,m_Zero()))
904returnConstant::getNullValue(Op0->getType());
905
906// X * 1 -> X
907if (match(Op1,m_One()))
908return Op0;
909
910// (X / Y) * Y -> X if the division is exact.
911Value *X =nullptr;
912if (Q.IIQ.UseInstrInfo &&
913 (match(Op0,
914m_Exact(m_IDiv(m_Value(X),m_Specific(Op1)))) ||// (X / Y) * Y
915match(Op1,m_Exact(m_IDiv(m_Value(X),m_Specific(Op0))))))// Y * (X / Y)
916returnX;
917
918if (Op0->getType()->isIntOrIntVectorTy(1)) {
919// mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
920// representable). All other cases reduce to 0, so just return 0.
921if (IsNSW)
922return ConstantInt::getNullValue(Op0->getType());
923
924// Treat "mul i1" as "and i1".
925if (MaxRecurse)
926if (Value *V =simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
927return V;
928 }
929
930// Try some generic simplifications for associative operations.
931if (Value *V =
932simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
933return V;
934
935// Mul distributes over Add. Try some generic simplifications based on this.
936if (Value *V =expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
937 Instruction::Add, Q, MaxRecurse))
938return V;
939
940// If the operation is with the result of a select instruction, check whether
941// operating on either branch of the select always yields the same value.
942if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
943if (Value *V =
944threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
945return V;
946
947// If the operation is with the result of a phi instruction, check whether
948// operating on all incoming values of the phi always yields the same value.
949if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
950if (Value *V =
951threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
952return V;
953
954returnnullptr;
955}
956
957Value *llvm::simplifyMulInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
958constSimplifyQuery &Q) {
959 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q,RecursionLimit);
960}
961
962/// Given a predicate and two operands, return true if the comparison is true.
963/// This is a helper for div/rem simplification where we return some other value
964/// when we can prove a relationship between the operands.
965staticboolisICmpTrue(CmpPredicate Pred,Value *LHS,Value *RHS,
966constSimplifyQuery &Q,unsigned MaxRecurse) {
967Value *V =simplifyICmpInst(Pred,LHS,RHS, Q, MaxRecurse);
968Constant *C = dyn_cast_or_null<Constant>(V);
969return (C &&C->isAllOnesValue());
970}
971
972/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
973/// to simplify X % Y to X.
974staticboolisDivZero(Value *X,Value *Y,constSimplifyQuery &Q,
975unsigned MaxRecurse,bool IsSigned) {
976// Recursion is always used, so bail out at once if we already hit the limit.
977if (!MaxRecurse--)
978returnfalse;
979
980if (IsSigned) {
981// (X srem Y) sdiv Y --> 0
982if (match(X,m_SRem(m_Value(),m_Specific(Y))))
983returntrue;
984
985// |X| / |Y| --> 0
986//
987// We require that 1 operand is a simple constant. That could be extended to
988// 2 variables if we computed the sign bit for each.
989//
990// Make sure that a constant is not the minimum signed value because taking
991// the abs() of that is undefined.
992Type *Ty =X->getType();
993constAPInt *C;
994if (match(X,m_APInt(C)) && !C->isMinSignedValue()) {
995// Is the variable divisor magnitude always greater than the constant
996// dividend magnitude?
997// |Y| > |C| --> Y < -abs(C) or Y > abs(C)
998Constant *PosDividendC = ConstantInt::get(Ty,C->abs());
999Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1000if (isICmpTrue(CmpInst::ICMP_SLT,Y, NegDividendC, Q, MaxRecurse) ||
1001isICmpTrue(CmpInst::ICMP_SGT,Y, PosDividendC, Q, MaxRecurse))
1002returntrue;
1003 }
1004if (match(Y,m_APInt(C))) {
1005// Special-case: we can't take the abs() of a minimum signed value. If
1006// that's the divisor, then all we have to do is prove that the dividend
1007// is also not the minimum signed value.
1008if (C->isMinSignedValue())
1009returnisICmpTrue(CmpInst::ICMP_NE,X,Y, Q, MaxRecurse);
1010
1011// Is the variable dividend magnitude always less than the constant
1012// divisor magnitude?
1013// |X| < |C| --> X > -abs(C) and X < abs(C)
1014Constant *PosDivisorC = ConstantInt::get(Ty,C->abs());
1015Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1016if (isICmpTrue(CmpInst::ICMP_SGT,X, NegDivisorC, Q, MaxRecurse) &&
1017isICmpTrue(CmpInst::ICMP_SLT,X, PosDivisorC, Q, MaxRecurse))
1018returntrue;
1019 }
1020returnfalse;
1021 }
1022
1023// IsSigned == false.
1024
1025// Is the unsigned dividend known to be less than a constant divisor?
1026// TODO: Convert this (and above) to range analysis
1027// ("computeConstantRangeIncludingKnownBits")?
1028constAPInt *C;
1029if (match(Y,m_APInt(C)) &&
1030computeKnownBits(X,/* Depth */ 0, Q).getMaxValue().ult(*C))
1031returntrue;
1032
1033// Try again for any divisor:
1034// Is the dividend unsigned less than the divisor?
1035returnisICmpTrue(ICmpInst::ICMP_ULT,X,Y, Q, MaxRecurse);
1036}
1037
1038/// Check for common or similar folds of integer division or integer remainder.
1039/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1040staticValue *simplifyDivRem(Instruction::BinaryOps Opcode,Value *Op0,
1041Value *Op1,constSimplifyQuery &Q,
1042unsigned MaxRecurse) {
1043bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1044bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1045
1046Type *Ty = Op0->getType();
1047
1048// X / undef -> poison
1049// X % undef -> poison
1050if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1051returnPoisonValue::get(Ty);
1052
1053// X / 0 -> poison
1054// X % 0 -> poison
1055// We don't need to preserve faults!
1056if (match(Op1,m_Zero()))
1057returnPoisonValue::get(Ty);
1058
1059// poison / X -> poison
1060// poison % X -> poison
1061if (isa<PoisonValue>(Op0))
1062return Op0;
1063
1064// undef / X -> 0
1065// undef % X -> 0
1066if (Q.isUndefValue(Op0))
1067returnConstant::getNullValue(Ty);
1068
1069// 0 / X -> 0
1070// 0 % X -> 0
1071if (match(Op0,m_Zero()))
1072returnConstant::getNullValue(Op0->getType());
1073
1074// X / X -> 1
1075// X % X -> 0
1076if (Op0 == Op1)
1077return IsDiv ? ConstantInt::get(Ty, 1) :Constant::getNullValue(Ty);
1078
1079KnownBits Known =computeKnownBits(Op1,/* Depth */ 0, Q);
1080// X / 0 -> poison
1081// X % 0 -> poison
1082// If the divisor is known to be zero, just return poison. This can happen in
1083// some cases where its provable indirectly the denominator is zero but it's
1084// not trivially simplifiable (i.e known zero through a phi node).
1085if (Known.isZero())
1086returnPoisonValue::get(Ty);
1087
1088// X / 1 -> X
1089// X % 1 -> 0
1090// If the divisor can only be zero or one, we can't have division-by-zero
1091// or remainder-by-zero, so assume the divisor is 1.
1092// e.g. 1, zext (i8 X), sdiv X (Y and 1)
1093if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1094return IsDiv ? Op0 :Constant::getNullValue(Ty);
1095
1096// If X * Y does not overflow, then:
1097// X * Y / Y -> X
1098// X * Y % Y -> 0
1099Value *X;
1100if (match(Op0,m_c_Mul(m_Value(X),m_Specific(Op1)))) {
1101auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1102// The multiplication can't overflow if it is defined not to, or if
1103// X == A / Y for some A.
1104if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1105 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1106 (IsSigned &&match(X,m_SDiv(m_Value(),m_Specific(Op1)))) ||
1107 (!IsSigned &&match(X,m_UDiv(m_Value(),m_Specific(Op1))))) {
1108return IsDiv ?X :Constant::getNullValue(Op0->getType());
1109 }
1110 }
1111
1112if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1113return IsDiv ?Constant::getNullValue(Op0->getType()) : Op0;
1114
1115if (Value *V =simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1116return V;
1117
1118// If the operation is with the result of a select instruction, check whether
1119// operating on either branch of the select always yields the same value.
1120if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1121if (Value *V =threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1122return V;
1123
1124// If the operation is with the result of a phi instruction, check whether
1125// operating on all incoming values of the phi always yields the same value.
1126if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1127if (Value *V =threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1128return V;
1129
1130returnnullptr;
1131}
1132
1133/// These are simplifications common to SDiv and UDiv.
1134staticValue *simplifyDiv(Instruction::BinaryOps Opcode,Value *Op0,Value *Op1,
1135bool IsExact,constSimplifyQuery &Q,
1136unsigned MaxRecurse) {
1137if (Constant *C =foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1138returnC;
1139
1140if (Value *V =simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1141return V;
1142
1143constAPInt *DivC;
1144if (IsExact &&match(Op1,m_APInt(DivC))) {
1145// If this is an exact divide by a constant, then the dividend (Op0) must
1146// have at least as many trailing zeros as the divisor to divide evenly. If
1147// it has less trailing zeros, then the result must be poison.
1148if (DivC->countr_zero()) {
1149KnownBits KnownOp0 =computeKnownBits(Op0,/* Depth */ 0, Q);
1150if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1151returnPoisonValue::get(Op0->getType());
1152 }
1153
1154// udiv exact (mul nsw X, C), C --> X
1155// sdiv exact (mul nuw X, C), C --> X
1156// where C is not a power of 2.
1157Value *X;
1158if (!DivC->isPowerOf2() &&
1159 (Opcode == Instruction::UDiv
1160 ?match(Op0,m_NSWMul(m_Value(X),m_Specific(Op1)))
1161 :match(Op0,m_NUWMul(m_Value(X),m_Specific(Op1)))))
1162returnX;
1163 }
1164
1165returnnullptr;
1166}
1167
1168/// These are simplifications common to SRem and URem.
1169staticValue *simplifyRem(Instruction::BinaryOps Opcode,Value *Op0,Value *Op1,
1170constSimplifyQuery &Q,unsigned MaxRecurse) {
1171if (Constant *C =foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1172returnC;
1173
1174if (Value *V =simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1175return V;
1176
1177// (X << Y) % X -> 0
1178if (Q.IIQ.UseInstrInfo) {
1179if ((Opcode == Instruction::SRem &&
1180match(Op0,m_NSWShl(m_Specific(Op1),m_Value()))) ||
1181 (Opcode == Instruction::URem &&
1182match(Op0,m_NUWShl(m_Specific(Op1),m_Value()))))
1183returnConstant::getNullValue(Op0->getType());
1184
1185constAPInt *C0;
1186if (match(Op1,m_APInt(C0))) {
1187// (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
1188// (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
1189if (Opcode == Instruction::SRem
1190 ?match(Op0,
1191m_NSWMul(m_Value(),m_CheckedInt([C0](constAPInt &C) {
1192returnC.srem(*C0).isZero();
1193 })))
1194 :match(Op0,
1195m_NUWMul(m_Value(),m_CheckedInt([C0](constAPInt &C) {
1196returnC.urem(*C0).isZero();
1197 }))))
1198returnConstant::getNullValue(Op0->getType());
1199 }
1200 }
1201returnnullptr;
1202}
1203
1204/// Given operands for an SDiv, see if we can fold the result.
1205/// If not, this returns null.
1206staticValue *simplifySDivInst(Value *Op0,Value *Op1,bool IsExact,
1207constSimplifyQuery &Q,unsigned MaxRecurse) {
1208// If two operands are negated and no signed overflow, return -1.
1209if (isKnownNegation(Op0, Op1,/*NeedNSW=*/true))
1210returnConstant::getAllOnesValue(Op0->getType());
1211
1212returnsimplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1213}
1214
1215Value *llvm::simplifySDivInst(Value *Op0,Value *Op1,bool IsExact,
1216constSimplifyQuery &Q) {
1217 return ::simplifySDivInst(Op0, Op1, IsExact, Q,RecursionLimit);
1218}
1219
1220/// Given operands for a UDiv, see if we can fold the result.
1221/// If not, this returns null.
1222staticValue *simplifyUDivInst(Value *Op0,Value *Op1,bool IsExact,
1223constSimplifyQuery &Q,unsigned MaxRecurse) {
1224returnsimplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1225}
1226
1227Value *llvm::simplifyUDivInst(Value *Op0,Value *Op1,bool IsExact,
1228constSimplifyQuery &Q) {
1229 return ::simplifyUDivInst(Op0, Op1, IsExact, Q,RecursionLimit);
1230}
1231
1232/// Given operands for an SRem, see if we can fold the result.
1233/// If not, this returns null.
1234staticValue *simplifySRemInst(Value *Op0,Value *Op1,constSimplifyQuery &Q,
1235unsigned MaxRecurse) {
1236// If the divisor is 0, the result is undefined, so assume the divisor is -1.
1237// srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1238Value *X;
1239if (match(Op1,m_SExt(m_Value(X))) &&X->getType()->isIntOrIntVectorTy(1))
1240return ConstantInt::getNullValue(Op0->getType());
1241
1242// If the two operands are negated, return 0.
1243if (isKnownNegation(Op0, Op1))
1244return ConstantInt::getNullValue(Op0->getType());
1245
1246returnsimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1247}
1248
1249Value *llvm::simplifySRemInst(Value *Op0,Value *Op1,constSimplifyQuery &Q) {
1250 return ::simplifySRemInst(Op0, Op1, Q,RecursionLimit);
1251}
1252
1253/// Given operands for a URem, see if we can fold the result.
1254/// If not, this returns null.
1255staticValue *simplifyURemInst(Value *Op0,Value *Op1,constSimplifyQuery &Q,
1256unsigned MaxRecurse) {
1257returnsimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1258}
1259
1260Value *llvm::simplifyURemInst(Value *Op0,Value *Op1,constSimplifyQuery &Q) {
1261 return ::simplifyURemInst(Op0, Op1, Q,RecursionLimit);
1262}
1263
1264/// Returns true if a shift by \c Amount always yields poison.
1265staticboolisPoisonShift(Value *Amount,constSimplifyQuery &Q) {
1266Constant *C = dyn_cast<Constant>(Amount);
1267if (!C)
1268returnfalse;
1269
1270// X shift by undef -> poison because it may shift by the bitwidth.
1271if (Q.isUndefValue(C))
1272returntrue;
1273
1274// Shifting by the bitwidth or more is poison. This covers scalars and
1275// fixed/scalable vectors with splat constants.
1276constAPInt *AmountC;
1277if (match(C,m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1278returntrue;
1279
1280// Try harder for fixed-length vectors:
1281// If all lanes of a vector shift are poison, the whole shift is poison.
1282if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1283for (unsignedI = 0,
1284 E = cast<FixedVectorType>(C->getType())->getNumElements();
1285I != E; ++I)
1286if (!isPoisonShift(C->getAggregateElement(I), Q))
1287returnfalse;
1288returntrue;
1289 }
1290
1291returnfalse;
1292}
1293
1294/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1295/// If not, this returns null.
1296staticValue *simplifyShift(Instruction::BinaryOps Opcode,Value *Op0,
1297Value *Op1,bool IsNSW,constSimplifyQuery &Q,
1298unsigned MaxRecurse) {
1299if (Constant *C =foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1300returnC;
1301
1302// poison shift by X -> poison
1303if (isa<PoisonValue>(Op0))
1304return Op0;
1305
1306// 0 shift by X -> 0
1307if (match(Op0,m_Zero()))
1308returnConstant::getNullValue(Op0->getType());
1309
1310// X shift by 0 -> X
1311// Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1312// would be poison.
1313Value *X;
1314if (match(Op1,m_Zero()) ||
1315 (match(Op1,m_SExt(m_Value(X))) &&X->getType()->isIntOrIntVectorTy(1)))
1316return Op0;
1317
1318// Fold undefined shifts.
1319if (isPoisonShift(Op1, Q))
1320returnPoisonValue::get(Op0->getType());
1321
1322// If the operation is with the result of a select instruction, check whether
1323// operating on either branch of the select always yields the same value.
1324if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1325if (Value *V =threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1326return V;
1327
1328// If the operation is with the result of a phi instruction, check whether
1329// operating on all incoming values of the phi always yields the same value.
1330if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1331if (Value *V =threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1332return V;
1333
1334// If any bits in the shift amount make that value greater than or equal to
1335// the number of bits in the type, the shift is undefined.
1336KnownBits KnownAmt =computeKnownBits(Op1,/* Depth */ 0, Q);
1337if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1338returnPoisonValue::get(Op0->getType());
1339
1340// If all valid bits in the shift amount are known zero, the first operand is
1341// unchanged.
1342unsigned NumValidShiftBits =Log2_32_Ceil(KnownAmt.getBitWidth());
1343if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1344return Op0;
1345
1346// Check for nsw shl leading to a poison value.
1347if (IsNSW) {
1348assert(Opcode == Instruction::Shl &&"Expected shl for nsw instruction");
1349KnownBits KnownVal =computeKnownBits(Op0,/* Depth */ 0, Q);
1350KnownBits KnownShl =KnownBits::shl(KnownVal, KnownAmt);
1351
1352if (KnownVal.Zero.isSignBitSet())
1353 KnownShl.Zero.setSignBit();
1354if (KnownVal.One.isSignBitSet())
1355 KnownShl.One.setSignBit();
1356
1357if (KnownShl.hasConflict())
1358returnPoisonValue::get(Op0->getType());
1359 }
1360
1361returnnullptr;
1362}
1363
1364/// Given operands for an LShr or AShr, see if we can fold the result. If not,
1365/// this returns null.
1366staticValue *simplifyRightShift(Instruction::BinaryOps Opcode,Value *Op0,
1367Value *Op1,bool IsExact,
1368constSimplifyQuery &Q,unsigned MaxRecurse) {
1369if (Value *V =
1370simplifyShift(Opcode, Op0, Op1,/*IsNSW*/false, Q, MaxRecurse))
1371return V;
1372
1373// X >> X -> 0
1374if (Op0 == Op1)
1375returnConstant::getNullValue(Op0->getType());
1376
1377// undef >> X -> 0
1378// undef >> X -> undef (if it's exact)
1379if (Q.isUndefValue(Op0))
1380return IsExact ? Op0 :Constant::getNullValue(Op0->getType());
1381
1382// The low bit cannot be shifted out of an exact shift if it is set.
1383// TODO: Generalize by counting trailing zeros (see fold for exact division).
1384if (IsExact) {
1385KnownBits Op0Known =computeKnownBits(Op0,/* Depth */ 0, Q);
1386if (Op0Known.One[0])
1387return Op0;
1388 }
1389
1390returnnullptr;
1391}
1392
1393/// Given operands for an Shl, see if we can fold the result.
1394/// If not, this returns null.
1395staticValue *simplifyShlInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
1396constSimplifyQuery &Q,unsigned MaxRecurse) {
1397if (Value *V =
1398simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1399return V;
1400
1401Type *Ty = Op0->getType();
1402// undef << X -> 0
1403// undef << X -> undef if (if it's NSW/NUW)
1404if (Q.isUndefValue(Op0))
1405return IsNSW || IsNUW ? Op0 :Constant::getNullValue(Ty);
1406
1407// (X >> A) << A -> X
1408Value *X;
1409if (Q.IIQ.UseInstrInfo &&
1410match(Op0,m_Exact(m_Shr(m_Value(X),m_Specific(Op1)))))
1411returnX;
1412
1413// shl nuw i8 C, %x -> C iff C has sign bit set.
1414if (IsNUW &&match(Op0,m_Negative()))
1415return Op0;
1416// NOTE: could use computeKnownBits() / LazyValueInfo,
1417// but the cost-benefit analysis suggests it isn't worth it.
1418
1419// "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1420// that the sign-bit does not change, so the only input that does not
1421// produce poison is 0, and "0 << (bitwidth-1) --> 0".
1422if (IsNSW && IsNUW &&
1423match(Op1,m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1424returnConstant::getNullValue(Ty);
1425
1426returnnullptr;
1427}
1428
1429Value *llvm::simplifyShlInst(Value *Op0,Value *Op1,bool IsNSW,bool IsNUW,
1430constSimplifyQuery &Q) {
1431 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q,RecursionLimit);
1432}
1433
1434/// Given operands for an LShr, see if we can fold the result.
1435/// If not, this returns null.
1436staticValue *simplifyLShrInst(Value *Op0,Value *Op1,bool IsExact,
1437constSimplifyQuery &Q,unsigned MaxRecurse) {
1438if (Value *V =simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1439 MaxRecurse))
1440return V;
1441
1442// (X << A) >> A -> X
1443Value *X;
1444if (Q.IIQ.UseInstrInfo &&match(Op0,m_NUWShl(m_Value(X),m_Specific(Op1))))
1445returnX;
1446
1447// ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1448// We can return X as we do in the above case since OR alters no bits in X.
1449// SimplifyDemandedBits in InstCombine can do more general optimization for
1450// bit manipulation. This pattern aims to provide opportunities for other
1451// optimizers by supporting a simple but common case in InstSimplify.
1452Value *Y;
1453constAPInt *ShRAmt, *ShLAmt;
1454if (Q.IIQ.UseInstrInfo &&match(Op1,m_APInt(ShRAmt)) &&
1455match(Op0,m_c_Or(m_NUWShl(m_Value(X),m_APInt(ShLAmt)),m_Value(Y))) &&
1456 *ShRAmt == *ShLAmt) {
1457constKnownBits YKnown =computeKnownBits(Y,/* Depth */ 0, Q);
1458constunsigned EffWidthY = YKnown.countMaxActiveBits();
1459if (ShRAmt->uge(EffWidthY))
1460returnX;
1461 }
1462
1463returnnullptr;
1464}
1465
1466Value *llvm::simplifyLShrInst(Value *Op0,Value *Op1,bool IsExact,
1467constSimplifyQuery &Q) {
1468 return ::simplifyLShrInst(Op0, Op1, IsExact, Q,RecursionLimit);
1469}
1470
1471/// Given operands for an AShr, see if we can fold the result.
1472/// If not, this returns null.
1473staticValue *simplifyAShrInst(Value *Op0,Value *Op1,bool IsExact,
1474constSimplifyQuery &Q,unsigned MaxRecurse) {
1475if (Value *V =simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1476 MaxRecurse))
1477return V;
1478
1479// -1 >>a X --> -1
1480// (-1 << X) a>> X --> -1
1481// We could return the original -1 constant to preserve poison elements.
1482if (match(Op0,m_AllOnes()) ||
1483match(Op0,m_Shl(m_AllOnes(),m_Specific(Op1))))
1484returnConstant::getAllOnesValue(Op0->getType());
1485
1486// (X << A) >> A -> X
1487Value *X;
1488if (Q.IIQ.UseInstrInfo &&match(Op0,m_NSWShl(m_Value(X),m_Specific(Op1))))
1489returnX;
1490
1491// Arithmetic shifting an all-sign-bit value is a no-op.
1492unsigned NumSignBits =ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1493if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1494return Op0;
1495
1496returnnullptr;
1497}
1498
1499Value *llvm::simplifyAShrInst(Value *Op0,Value *Op1,bool IsExact,
1500constSimplifyQuery &Q) {
1501 return ::simplifyAShrInst(Op0, Op1, IsExact, Q,RecursionLimit);
1502}
1503
1504/// Commuted variants are assumed to be handled by calling this function again
1505/// with the parameters swapped.
1506staticValue *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1507ICmpInst *UnsignedICmp,bool IsAnd,
1508constSimplifyQuery &Q) {
1509Value *X, *Y;
1510
1511CmpPredicate EqPred;
1512if (!match(ZeroICmp,m_ICmp(EqPred,m_Value(Y),m_Zero())) ||
1513 !ICmpInst::isEquality(EqPred))
1514returnnullptr;
1515
1516CmpPredicate UnsignedPred;
1517
1518Value *A, *B;
1519// Y = (A - B);
1520if (match(Y,m_Sub(m_Value(A),m_Value(B)))) {
1521if (match(UnsignedICmp,
1522m_c_ICmp(UnsignedPred,m_Specific(A),m_Specific(B))) &&
1523 ICmpInst::isUnsigned(UnsignedPred)) {
1524// A >=/<= B || (A - B) != 0 <--> true
1525if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1526 UnsignedPred == ICmpInst::ICMP_ULE) &&
1527 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1528returnConstantInt::getTrue(UnsignedICmp->getType());
1529// A </> B && (A - B) == 0 <--> false
1530if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1531 UnsignedPred == ICmpInst::ICMP_UGT) &&
1532 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1533returnConstantInt::getFalse(UnsignedICmp->getType());
1534
1535// A </> B && (A - B) != 0 <--> A </> B
1536// A </> B || (A - B) != 0 <--> (A - B) != 0
1537if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1538 UnsignedPred == ICmpInst::ICMP_UGT))
1539return IsAnd ? UnsignedICmp : ZeroICmp;
1540
1541// A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1542// A <=/>= B || (A - B) == 0 <--> A <=/>= B
1543if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1544 UnsignedPred == ICmpInst::ICMP_UGE))
1545return IsAnd ? ZeroICmp : UnsignedICmp;
1546 }
1547
1548// Given Y = (A - B)
1549// Y >= A && Y != 0 --> Y >= A iff B != 0
1550// Y < A || Y == 0 --> Y < A iff B != 0
1551if (match(UnsignedICmp,
1552m_c_ICmp(UnsignedPred,m_Specific(Y),m_Specific(A)))) {
1553if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1554 EqPred == ICmpInst::ICMP_NE &&isKnownNonZero(B, Q))
1555return UnsignedICmp;
1556if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1557 EqPred == ICmpInst::ICMP_EQ &&isKnownNonZero(B, Q))
1558return UnsignedICmp;
1559 }
1560 }
1561
1562if (match(UnsignedICmp,m_ICmp(UnsignedPred,m_Value(X),m_Specific(Y))) &&
1563 ICmpInst::isUnsigned(UnsignedPred))
1564 ;
1565elseif (match(UnsignedICmp,
1566m_ICmp(UnsignedPred,m_Specific(Y),m_Value(X))) &&
1567 ICmpInst::isUnsigned(UnsignedPred))
1568 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1569else
1570returnnullptr;
1571
1572// X > Y && Y == 0 --> Y == 0 iff X != 0
1573// X > Y || Y == 0 --> X > Y iff X != 0
1574if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1575isKnownNonZero(X, Q))
1576return IsAnd ? ZeroICmp : UnsignedICmp;
1577
1578// X <= Y && Y != 0 --> X <= Y iff X != 0
1579// X <= Y || Y != 0 --> Y != 0 iff X != 0
1580if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1581isKnownNonZero(X, Q))
1582return IsAnd ? UnsignedICmp : ZeroICmp;
1583
1584// The transforms below here are expected to be handled more generally with
1585// simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1586// foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1587// these are candidates for removal.
1588
1589// X < Y && Y != 0 --> X < Y
1590// X < Y || Y != 0 --> Y != 0
1591if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1592return IsAnd ? UnsignedICmp : ZeroICmp;
1593
1594// X >= Y && Y == 0 --> Y == 0
1595// X >= Y || Y == 0 --> X >= Y
1596if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1597return IsAnd ? ZeroICmp : UnsignedICmp;
1598
1599// X < Y && Y == 0 --> false
1600if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1601 IsAnd)
1602returngetFalse(UnsignedICmp->getType());
1603
1604// X >= Y || Y != 0 --> true
1605if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1606 !IsAnd)
1607returngetTrue(UnsignedICmp->getType());
1608
1609returnnullptr;
1610}
1611
1612/// Test if a pair of compares with a shared operand and 2 constants has an
1613/// empty set intersection, full set union, or if one compare is a superset of
1614/// the other.
1615staticValue *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0,ICmpInst *Cmp1,
1616bool IsAnd) {
1617// Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1618if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1619returnnullptr;
1620
1621constAPInt *C0, *C1;
1622if (!match(Cmp0->getOperand(1),m_APInt(C0)) ||
1623 !match(Cmp1->getOperand(1),m_APInt(C1)))
1624returnnullptr;
1625
1626auto Range0 =ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1627auto Range1 =ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1628
1629// For and-of-compares, check if the intersection is empty:
1630// (icmp X, C0) && (icmp X, C1) --> empty set --> false
1631if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1632returngetFalse(Cmp0->getType());
1633
1634// For or-of-compares, check if the union is full:
1635// (icmp X, C0) || (icmp X, C1) --> full set --> true
1636if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1637returngetTrue(Cmp0->getType());
1638
1639// Is one range a superset of the other?
1640// If this is and-of-compares, take the smaller set:
1641// (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1642// If this is or-of-compares, take the larger set:
1643// (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1644if (Range0.contains(Range1))
1645return IsAnd ? Cmp1 : Cmp0;
1646if (Range1.contains(Range0))
1647return IsAnd ? Cmp0 : Cmp1;
1648
1649returnnullptr;
1650}
1651
1652staticValue *simplifyAndOfICmpsWithAdd(ICmpInst *Op0,ICmpInst *Op1,
1653constInstrInfoQuery &IIQ) {
1654// (icmp (add V, C0), C1) & (icmp V, C0)
1655CmpPredicate Pred0, Pred1;
1656constAPInt *C0, *C1;
1657Value *V;
1658if (!match(Op0,m_ICmp(Pred0,m_Add(m_Value(V),m_APInt(C0)),m_APInt(C1))))
1659returnnullptr;
1660
1661if (!match(Op1,m_ICmp(Pred1,m_Specific(V),m_Value())))
1662returnnullptr;
1663
1664auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1665if (AddInst->getOperand(1) != Op1->getOperand(1))
1666returnnullptr;
1667
1668Type *ITy = Op0->getType();
1669bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1670bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1671
1672constAPInt Delta = *C1 - *C0;
1673if (C0->isStrictlyPositive()) {
1674if (Delta == 2) {
1675if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1676returngetFalse(ITy);
1677if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1678returngetFalse(ITy);
1679 }
1680if (Delta == 1) {
1681if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1682returngetFalse(ITy);
1683if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1684returngetFalse(ITy);
1685 }
1686 }
1687if (C0->getBoolValue() && IsNUW) {
1688if (Delta == 2)
1689if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1690returngetFalse(ITy);
1691if (Delta == 1)
1692if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1693returngetFalse(ITy);
1694 }
1695
1696returnnullptr;
1697}
1698
1699/// Try to simplify and/or of icmp with ctpop intrinsic.
1700staticValue *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0,ICmpInst *Cmp1,
1701bool IsAnd) {
1702CmpPredicate Pred0, Pred1;
1703Value *X;
1704constAPInt *C;
1705if (!match(Cmp0,m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
1706m_APInt(C))) ||
1707 !match(Cmp1,m_ICmp(Pred1,m_Specific(X),m_ZeroInt())) ||C->isZero())
1708returnnullptr;
1709
1710// (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1711if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1712return Cmp1;
1713// (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1714if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1715return Cmp1;
1716
1717returnnullptr;
1718}
1719
1720staticValue *simplifyAndOfICmps(ICmpInst *Op0,ICmpInst *Op1,
1721constSimplifyQuery &Q) {
1722if (Value *X =simplifyUnsignedRangeCheck(Op0, Op1,/*IsAnd=*/true, Q))
1723returnX;
1724if (Value *X =simplifyUnsignedRangeCheck(Op1, Op0,/*IsAnd=*/true, Q))
1725returnX;
1726
1727if (Value *X =simplifyAndOrOfICmpsWithConstants(Op0, Op1,true))
1728returnX;
1729
1730if (Value *X =simplifyAndOrOfICmpsWithCtpop(Op0, Op1,true))
1731returnX;
1732if (Value *X =simplifyAndOrOfICmpsWithCtpop(Op1, Op0,true))
1733returnX;
1734
1735if (Value *X =simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1736returnX;
1737if (Value *X =simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1738returnX;
1739
1740returnnullptr;
1741}
1742
1743staticValue *simplifyOrOfICmpsWithAdd(ICmpInst *Op0,ICmpInst *Op1,
1744constInstrInfoQuery &IIQ) {
1745// (icmp (add V, C0), C1) | (icmp V, C0)
1746CmpPredicate Pred0, Pred1;
1747constAPInt *C0, *C1;
1748Value *V;
1749if (!match(Op0,m_ICmp(Pred0,m_Add(m_Value(V),m_APInt(C0)),m_APInt(C1))))
1750returnnullptr;
1751
1752if (!match(Op1,m_ICmp(Pred1,m_Specific(V),m_Value())))
1753returnnullptr;
1754
1755auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1756if (AddInst->getOperand(1) != Op1->getOperand(1))
1757returnnullptr;
1758
1759Type *ITy = Op0->getType();
1760bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1761bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1762
1763constAPInt Delta = *C1 - *C0;
1764if (C0->isStrictlyPositive()) {
1765if (Delta == 2) {
1766if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1767returngetTrue(ITy);
1768if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1769returngetTrue(ITy);
1770 }
1771if (Delta == 1) {
1772if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1773returngetTrue(ITy);
1774if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1775returngetTrue(ITy);
1776 }
1777 }
1778if (C0->getBoolValue() && IsNUW) {
1779if (Delta == 2)
1780if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1781returngetTrue(ITy);
1782if (Delta == 1)
1783if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1784returngetTrue(ITy);
1785 }
1786
1787returnnullptr;
1788}
1789
1790staticValue *simplifyOrOfICmps(ICmpInst *Op0,ICmpInst *Op1,
1791constSimplifyQuery &Q) {
1792if (Value *X =simplifyUnsignedRangeCheck(Op0, Op1,/*IsAnd=*/false, Q))
1793returnX;
1794if (Value *X =simplifyUnsignedRangeCheck(Op1, Op0,/*IsAnd=*/false, Q))
1795returnX;
1796
1797if (Value *X =simplifyAndOrOfICmpsWithConstants(Op0, Op1,false))
1798returnX;
1799
1800if (Value *X =simplifyAndOrOfICmpsWithCtpop(Op0, Op1,false))
1801returnX;
1802if (Value *X =simplifyAndOrOfICmpsWithCtpop(Op1, Op0,false))
1803returnX;
1804
1805if (Value *X =simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1806returnX;
1807if (Value *X =simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1808returnX;
1809
1810returnnullptr;
1811}
1812
1813staticValue *simplifyAndOrOfFCmps(constSimplifyQuery &Q,FCmpInst *LHS,
1814FCmpInst *RHS,bool IsAnd) {
1815Value *LHS0 =LHS->getOperand(0), *LHS1 =LHS->getOperand(1);
1816Value *RHS0 =RHS->getOperand(0), *RHS1 =RHS->getOperand(1);
1817if (LHS0->getType() != RHS0->getType())
1818returnnullptr;
1819
1820FCmpInst::Predicate PredL =LHS->getPredicate(), PredR =RHS->getPredicate();
1821auto AbsOrSelfLHS0 =m_CombineOr(m_Specific(LHS0),m_FAbs(m_Specific(LHS0)));
1822if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1823 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1824 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1825// (fcmp ord X, 0) & (fcmp o** X/abs(X), Y) --> fcmp o** X/abs(X), Y
1826// (fcmp uno X, 0) & (fcmp o** X/abs(X), Y) --> false
1827// (fcmp uno X, 0) | (fcmp u** X/abs(X), Y) --> fcmp u** X/abs(X), Y
1828// (fcmp ord X, 0) | (fcmp u** X/abs(X), Y) --> true
1829if ((match(RHS0, AbsOrSelfLHS0) ||match(RHS1, AbsOrSelfLHS0)) &&
1830match(LHS1,m_PosZeroFP()))
1831return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1832 ?static_cast<Value *>(RHS)
1833 :ConstantInt::getBool(LHS->getType(), !IsAnd);
1834 }
1835
1836auto AbsOrSelfRHS0 =m_CombineOr(m_Specific(RHS0),m_FAbs(m_Specific(RHS0)));
1837if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1838 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1839 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1840// (fcmp o** X/abs(X), Y) & (fcmp ord X, 0) --> fcmp o** X/abs(X), Y
1841// (fcmp o** X/abs(X), Y) & (fcmp uno X, 0) --> false
1842// (fcmp u** X/abs(X), Y) | (fcmp uno X, 0) --> fcmp u** X/abs(X), Y
1843// (fcmp u** X/abs(X), Y) | (fcmp ord X, 0) --> true
1844if ((match(LHS0, AbsOrSelfRHS0) ||match(LHS1, AbsOrSelfRHS0)) &&
1845match(RHS1,m_PosZeroFP()))
1846return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1847 ?static_cast<Value *>(LHS)
1848 :ConstantInt::getBool(LHS->getType(), !IsAnd);
1849 }
1850
1851returnnullptr;
1852}
1853
1854staticValue *simplifyAndOrOfCmps(constSimplifyQuery &Q,Value *Op0,
1855Value *Op1,bool IsAnd) {
1856// Look through casts of the 'and' operands to find compares.
1857auto *Cast0 = dyn_cast<CastInst>(Op0);
1858auto *Cast1 = dyn_cast<CastInst>(Op1);
1859if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1860 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1861 Op0 = Cast0->getOperand(0);
1862 Op1 = Cast1->getOperand(0);
1863 }
1864
1865Value *V =nullptr;
1866auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1867auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1868if (ICmp0 && ICmp1)
1869 V = IsAnd ?simplifyAndOfICmps(ICmp0, ICmp1, Q)
1870 :simplifyOrOfICmps(ICmp0, ICmp1, Q);
1871
1872auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1873auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1874if (FCmp0 && FCmp1)
1875 V =simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1876
1877if (!V)
1878returnnullptr;
1879if (!Cast0)
1880return V;
1881
1882// If we looked through casts, we can only handle a constant simplification
1883// because we are not allowed to create a cast instruction here.
1884if (auto *C = dyn_cast<Constant>(V))
1885returnConstantFoldCastOperand(Cast0->getOpcode(),C, Cast0->getType(),
1886 Q.DL);
1887
1888returnnullptr;
1889}
1890
1891staticValue *simplifyWithOpReplaced(Value *V,Value *Op,Value *RepOp,
1892constSimplifyQuery &Q,
1893bool AllowRefinement,
1894SmallVectorImpl<Instruction *> *DropFlags,
1895unsigned MaxRecurse);
1896
1897staticValue *simplifyAndOrWithICmpEq(unsigned Opcode,Value *Op0,Value *Op1,
1898constSimplifyQuery &Q,
1899unsigned MaxRecurse) {
1900assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1901"Must be and/or");
1902CmpPredicate Pred;
1903Value *A, *B;
1904if (!match(Op0,m_ICmp(Pred,m_Value(A),m_Value(B))) ||
1905 !ICmpInst::isEquality(Pred))
1906returnnullptr;
1907
1908auto Simplify = [&](Value *Res) ->Value * {
1909Constant *Absorber =ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1910
1911// and (icmp eq a, b), x implies (a==b) inside x.
1912// or (icmp ne a, b), x implies (a==b) inside x.
1913// If x simplifies to true/false, we can simplify the and/or.
1914if (Pred ==
1915 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1916if (Res == Absorber)
1917return Absorber;
1918if (Res ==ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1919return Op0;
1920returnnullptr;
1921 }
1922
1923// If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1924// then we can drop the icmp, as x will already be false in the case where
1925// the icmp is false. Similar for or and true.
1926if (Res == Absorber)
1927return Op1;
1928returnnullptr;
1929 };
1930
1931// In the final case (Res == Absorber with inverted predicate), it is safe to
1932// refine poison during simplification, but not undef. For simplicity always
1933// disable undef-based folds here.
1934if (Value *Res =simplifyWithOpReplaced(Op1,A,B, Q.getWithoutUndef(),
1935/* AllowRefinement */true,
1936/* DropFlags */nullptr, MaxRecurse))
1937return Simplify(Res);
1938if (Value *Res =simplifyWithOpReplaced(Op1,B,A, Q.getWithoutUndef(),
1939/* AllowRefinement */true,
1940/* DropFlags */nullptr, MaxRecurse))
1941return Simplify(Res);
1942
1943returnnullptr;
1944}
1945
1946/// Given a bitwise logic op, check if the operands are add/sub with a common
1947/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1948staticValue *simplifyLogicOfAddSub(Value *Op0,Value *Op1,
1949Instruction::BinaryOps Opcode) {
1950assert(Op0->getType() == Op1->getType() &&"Mismatched binop types");
1951assert(BinaryOperator::isBitwiseLogicOp(Opcode) &&"Expected logic op");
1952Value *X;
1953Constant *C1, *C2;
1954if ((match(Op0,m_Add(m_Value(X),m_Constant(C1))) &&
1955match(Op1,m_Sub(m_Constant(C2),m_Specific(X)))) ||
1956 (match(Op1,m_Add(m_Value(X),m_Constant(C1))) &&
1957match(Op0,m_Sub(m_Constant(C2),m_Specific(X))))) {
1958if (ConstantExpr::getNot(C1) == C2) {
1959// (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1960// (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1961// (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1962Type *Ty = Op0->getType();
1963return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1964 : ConstantInt::getAllOnesValue(Ty);
1965 }
1966 }
1967returnnullptr;
1968}
1969
1970// Commutative patterns for and that will be tried with both operand orders.
1971staticValue *simplifyAndCommutative(Value *Op0,Value *Op1,
1972constSimplifyQuery &Q,
1973unsigned MaxRecurse) {
1974// ~A & A = 0
1975if (match(Op0,m_Not(m_Specific(Op1))))
1976returnConstant::getNullValue(Op0->getType());
1977
1978// (A | ?) & A = A
1979if (match(Op0,m_c_Or(m_Specific(Op1),m_Value())))
1980return Op1;
1981
1982// (X | ~Y) & (X | Y) --> X
1983Value *X, *Y;
1984if (match(Op0,m_c_Or(m_Value(X),m_Not(m_Value(Y)))) &&
1985match(Op1,m_c_Or(m_Specific(X),m_Specific(Y))))
1986returnX;
1987
1988// If we have a multiplication overflow check that is being 'and'ed with a
1989// check that one of the multipliers is not zero, we can omit the 'and', and
1990// only keep the overflow check.
1991if (isCheckForZeroAndMulWithOverflow(Op0, Op1,true))
1992return Op1;
1993
1994// -A & A = A if A is a power of two or zero.
1995if (match(Op0,m_Neg(m_Specific(Op1))) &&
1996isKnownToBeAPowerOfTwo(Op1, Q.DL,/*OrZero*/true, 0, Q.AC, Q.CxtI, Q.DT))
1997return Op1;
1998
1999// This is a similar pattern used for checking if a value is a power-of-2:
2000// (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2001if (match(Op0,m_Add(m_Specific(Op1),m_AllOnes())) &&
2002isKnownToBeAPowerOfTwo(Op1, Q.DL,/*OrZero*/true, 0, Q.AC, Q.CxtI, Q.DT))
2003returnConstant::getNullValue(Op1->getType());
2004
2005// (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2006// M <= N.
2007constAPInt *Shift1, *Shift2;
2008if (match(Op0,m_Shl(m_Value(X),m_APInt(Shift1))) &&
2009match(Op1,m_Add(m_Shl(m_Specific(X),m_APInt(Shift2)),m_AllOnes())) &&
2010isKnownToBeAPowerOfTwo(X, Q.DL,/*OrZero*/true,/*Depth*/ 0, Q.AC,
2011 Q.CxtI) &&
2012 Shift1->uge(*Shift2))
2013returnConstant::getNullValue(Op0->getType());
2014
2015if (Value *V =
2016simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2017return V;
2018
2019returnnullptr;
2020}
2021
2022/// Given operands for an And, see if we can fold the result.
2023/// If not, this returns null.
2024staticValue *simplifyAndInst(Value *Op0,Value *Op1,constSimplifyQuery &Q,
2025unsigned MaxRecurse) {
2026if (Constant *C =foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2027returnC;
2028
2029// X & poison -> poison
2030if (isa<PoisonValue>(Op1))
2031return Op1;
2032
2033// X & undef -> 0
2034if (Q.isUndefValue(Op1))
2035returnConstant::getNullValue(Op0->getType());
2036
2037// X & X = X
2038if (Op0 == Op1)
2039return Op0;
2040
2041// X & 0 = 0
2042if (match(Op1,m_Zero()))
2043returnConstant::getNullValue(Op0->getType());
2044
2045// X & -1 = X
2046if (match(Op1,m_AllOnes()))
2047return Op0;
2048
2049if (Value *Res =simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2050return Res;
2051if (Value *Res =simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2052return Res;
2053
2054if (Value *V =simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2055return V;
2056
2057// A mask that only clears known zeros of a shifted value is a no-op.
2058constAPInt *Mask;
2059constAPInt *ShAmt;
2060Value *X, *Y;
2061if (match(Op1,m_APInt(Mask))) {
2062// If all bits in the inverted and shifted mask are clear:
2063// and (shl X, ShAmt), Mask --> shl X, ShAmt
2064if (match(Op0,m_Shl(m_Value(X),m_APInt(ShAmt))) &&
2065 (~(*Mask)).lshr(*ShAmt).isZero())
2066return Op0;
2067
2068// If all bits in the inverted and shifted mask are clear:
2069// and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2070if (match(Op0,m_LShr(m_Value(X),m_APInt(ShAmt))) &&
2071 (~(*Mask)).shl(*ShAmt).isZero())
2072return Op0;
2073 }
2074
2075// and 2^x-1, 2^C --> 0 where x <= C.
2076constAPInt *PowerC;
2077Value *Shift;
2078if (match(Op1,m_Power2(PowerC)) &&
2079match(Op0,m_Add(m_Value(Shift),m_AllOnes())) &&
2080isKnownToBeAPowerOfTwo(Shift, Q.DL,/*OrZero*/false, 0, Q.AC, Q.CxtI,
2081 Q.DT)) {
2082KnownBits Known =computeKnownBits(Shift,/* Depth */ 0, Q);
2083// Use getActiveBits() to make use of the additional power of two knowledge
2084if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2085return ConstantInt::getNullValue(Op1->getType());
2086 }
2087
2088if (Value *V =simplifyAndOrOfCmps(Q, Op0, Op1,true))
2089return V;
2090
2091// Try some generic simplifications for associative operations.
2092if (Value *V =
2093simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2094return V;
2095
2096// And distributes over Or. Try some generic simplifications based on this.
2097if (Value *V =expandCommutativeBinOp(Instruction::And, Op0, Op1,
2098 Instruction::Or, Q, MaxRecurse))
2099return V;
2100
2101// And distributes over Xor. Try some generic simplifications based on this.
2102if (Value *V =expandCommutativeBinOp(Instruction::And, Op0, Op1,
2103 Instruction::Xor, Q, MaxRecurse))
2104return V;
2105
2106if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2107if (Op0->getType()->isIntOrIntVectorTy(1)) {
2108// A & (A && B) -> A && B
2109if (match(Op1,m_Select(m_Specific(Op0),m_Value(),m_Zero())))
2110return Op1;
2111elseif (match(Op0,m_Select(m_Specific(Op1),m_Value(),m_Zero())))
2112return Op0;
2113 }
2114// If the operation is with the result of a select instruction, check
2115// whether operating on either branch of the select always yields the same
2116// value.
2117if (Value *V =
2118threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2119return V;
2120 }
2121
2122// If the operation is with the result of a phi instruction, check whether
2123// operating on all incoming values of the phi always yields the same value.
2124if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2125if (Value *V =
2126threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2127return V;
2128
2129// Assuming the effective width of Y is not larger than A, i.e. all bits
2130// from X and Y are disjoint in (X << A) | Y,
2131// if the mask of this AND op covers all bits of X or Y, while it covers
2132// no bits from the other, we can bypass this AND op. E.g.,
2133// ((X << A) | Y) & Mask -> Y,
2134// if Mask = ((1 << effective_width_of(Y)) - 1)
2135// ((X << A) | Y) & Mask -> X << A,
2136// if Mask = ((1 << effective_width_of(X)) - 1) << A
2137// SimplifyDemandedBits in InstCombine can optimize the general case.
2138// This pattern aims to help other passes for a common case.
2139Value *XShifted;
2140if (Q.IIQ.UseInstrInfo &&match(Op1,m_APInt(Mask)) &&
2141match(Op0,m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X),m_APInt(ShAmt)),
2142m_Value(XShifted)),
2143m_Value(Y)))) {
2144constunsigned Width = Op0->getType()->getScalarSizeInBits();
2145constunsigned ShftCnt = ShAmt->getLimitedValue(Width);
2146constKnownBits YKnown =computeKnownBits(Y,/* Depth */ 0, Q);
2147constunsigned EffWidthY = YKnown.countMaxActiveBits();
2148if (EffWidthY <= ShftCnt) {
2149constKnownBits XKnown =computeKnownBits(X,/* Depth */ 0, Q);
2150constunsigned EffWidthX = XKnown.countMaxActiveBits();
2151constAPInt EffBitsY =APInt::getLowBitsSet(Width, EffWidthY);
2152constAPInt EffBitsX =APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2153// If the mask is extracting all bits from X or Y as is, we can skip
2154// this AND op.
2155if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2156returnY;
2157if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2158return XShifted;
2159 }
2160 }
2161
2162// ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2163// ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2164BinaryOperator *Or;
2165if (match(Op0,m_c_Xor(m_Value(X),
2166m_CombineAnd(m_BinOp(Or),
2167m_c_Or(m_Deferred(X),m_Value(Y))))) &&
2168match(Op1,m_c_Xor(m_Specific(Or),m_Specific(Y))))
2169returnConstant::getNullValue(Op0->getType());
2170
2171constAPInt *C1;
2172Value *A;
2173// (A ^ C) & (A ^ ~C) -> 0
2174if (match(Op0,m_Xor(m_Value(A),m_APInt(C1))) &&
2175match(Op1,m_Xor(m_Specific(A),m_SpecificInt(~*C1))))
2176returnConstant::getNullValue(Op0->getType());
2177
2178if (Op0->getType()->isIntOrIntVectorTy(1)) {
2179if (std::optional<bool> Implied =isImpliedCondition(Op0, Op1, Q.DL)) {
2180// If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2181if (*Implied ==true)
2182return Op0;
2183// If Op0 is true implies Op1 is false, then they are not true together.
2184if (*Implied ==false)
2185returnConstantInt::getFalse(Op0->getType());
2186 }
2187if (std::optional<bool> Implied =isImpliedCondition(Op1, Op0, Q.DL)) {
2188// If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2189if (*Implied)
2190return Op1;
2191// If Op1 is true implies Op0 is false, then they are not true together.
2192if (!*Implied)
2193returnConstantInt::getFalse(Op1->getType());
2194 }
2195 }
2196
2197if (Value *V =simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2198return V;
2199
2200returnnullptr;
2201}
2202
2203Value *llvm::simplifyAndInst(Value *Op0,Value *Op1,constSimplifyQuery &Q) {
2204 return ::simplifyAndInst(Op0, Op1, Q,RecursionLimit);
2205}
2206
2207// TODO: Many of these folds could use LogicalAnd/LogicalOr.
2208staticValue *simplifyOrLogic(Value *X,Value *Y) {
2209assert(X->getType() ==Y->getType() &&"Expected same type for 'or' ops");
2210Type *Ty =X->getType();
2211
2212// X | ~X --> -1
2213if (match(Y,m_Not(m_Specific(X))))
2214return ConstantInt::getAllOnesValue(Ty);
2215
2216// X | ~(X & ?) = -1
2217if (match(Y,m_Not(m_c_And(m_Specific(X),m_Value()))))
2218return ConstantInt::getAllOnesValue(Ty);
2219
2220// X | (X & ?) --> X
2221if (match(Y,m_c_And(m_Specific(X),m_Value())))
2222returnX;
2223
2224Value *A, *B;
2225
2226// (A ^ B) | (A | B) --> A | B
2227// (A ^ B) | (B | A) --> B | A
2228if (match(X,m_Xor(m_Value(A),m_Value(B))) &&
2229match(Y,m_c_Or(m_Specific(A),m_Specific(B))))
2230returnY;
2231
2232// ~(A ^ B) | (A | B) --> -1
2233// ~(A ^ B) | (B | A) --> -1
2234if (match(X,m_Not(m_Xor(m_Value(A),m_Value(B)))) &&
2235match(Y,m_c_Or(m_Specific(A),m_Specific(B))))
2236return ConstantInt::getAllOnesValue(Ty);
2237
2238// (A & ~B) | (A ^ B) --> A ^ B
2239// (~B & A) | (A ^ B) --> A ^ B
2240// (A & ~B) | (B ^ A) --> B ^ A
2241// (~B & A) | (B ^ A) --> B ^ A
2242if (match(X,m_c_And(m_Value(A),m_Not(m_Value(B)))) &&
2243match(Y,m_c_Xor(m_Specific(A),m_Specific(B))))
2244returnY;
2245
2246// (~A ^ B) | (A & B) --> ~A ^ B
2247// (B ^ ~A) | (A & B) --> B ^ ~A
2248// (~A ^ B) | (B & A) --> ~A ^ B
2249// (B ^ ~A) | (B & A) --> B ^ ~A
2250if (match(X,m_c_Xor(m_Not(m_Value(A)),m_Value(B))) &&
2251match(Y,m_c_And(m_Specific(A),m_Specific(B))))
2252returnX;
2253
2254// (~A | B) | (A ^ B) --> -1
2255// (~A | B) | (B ^ A) --> -1
2256// (B | ~A) | (A ^ B) --> -1
2257// (B | ~A) | (B ^ A) --> -1
2258if (match(X,m_c_Or(m_Not(m_Value(A)),m_Value(B))) &&
2259match(Y,m_c_Xor(m_Specific(A),m_Specific(B))))
2260return ConstantInt::getAllOnesValue(Ty);
2261
2262// (~A & B) | ~(A | B) --> ~A
2263// (~A & B) | ~(B | A) --> ~A
2264// (B & ~A) | ~(A | B) --> ~A
2265// (B & ~A) | ~(B | A) --> ~A
2266Value *NotA;
2267if (match(X,m_c_And(m_CombineAnd(m_Value(NotA),m_Not(m_Value(A))),
2268m_Value(B))) &&
2269match(Y,m_Not(m_c_Or(m_Specific(A),m_Specific(B)))))
2270return NotA;
2271// The same is true of Logical And
2272// TODO: This could share the logic of the version above if there was a
2273// version of LogicalAnd that allowed more than just i1 types.
2274if (match(X,m_c_LogicalAnd(m_CombineAnd(m_Value(NotA),m_Not(m_Value(A))),
2275m_Value(B))) &&
2276match(Y,m_Not(m_c_LogicalOr(m_Specific(A),m_Specific(B)))))
2277return NotA;
2278
2279// ~(A ^ B) | (A & B) --> ~(A ^ B)
2280// ~(A ^ B) | (B & A) --> ~(A ^ B)
2281Value *NotAB;
2282if (match(X,m_CombineAnd(m_Not(m_Xor(m_Value(A),m_Value(B))),
2283m_Value(NotAB))) &&
2284match(Y,m_c_And(m_Specific(A),m_Specific(B))))
2285return NotAB;
2286
2287// ~(A & B) | (A ^ B) --> ~(A & B)
2288// ~(A & B) | (B ^ A) --> ~(A & B)
2289if (match(X,m_CombineAnd(m_Not(m_And(m_Value(A),m_Value(B))),
2290m_Value(NotAB))) &&
2291match(Y,m_c_Xor(m_Specific(A),m_Specific(B))))
2292return NotAB;
2293
2294returnnullptr;
2295}
2296
2297/// Given operands for an Or, see if we can fold the result.
2298/// If not, this returns null.
2299staticValue *simplifyOrInst(Value *Op0,Value *Op1,constSimplifyQuery &Q,
2300unsigned MaxRecurse) {
2301if (Constant *C =foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2302returnC;
2303
2304// X | poison -> poison
2305if (isa<PoisonValue>(Op1))
2306return Op1;
2307
2308// X | undef -> -1
2309// X | -1 = -1
2310// Do not return Op1 because it may contain undef elements if it's a vector.
2311if (Q.isUndefValue(Op1) ||match(Op1,m_AllOnes()))
2312returnConstant::getAllOnesValue(Op0->getType());
2313
2314// X | X = X
2315// X | 0 = X
2316if (Op0 == Op1 ||match(Op1,m_Zero()))
2317return Op0;
2318
2319if (Value *R =simplifyOrLogic(Op0, Op1))
2320return R;
2321if (Value *R =simplifyOrLogic(Op1, Op0))
2322return R;
2323
2324if (Value *V =simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2325return V;
2326
2327// Rotated -1 is still -1:
2328// (-1 << X) | (-1 >> (C - X)) --> -1
2329// (-1 >> X) | (-1 << (C - X)) --> -1
2330// ...with C <= bitwidth (and commuted variants).
2331Value *X, *Y;
2332if ((match(Op0,m_Shl(m_AllOnes(),m_Value(X))) &&
2333match(Op1,m_LShr(m_AllOnes(),m_Value(Y)))) ||
2334 (match(Op1,m_Shl(m_AllOnes(),m_Value(X))) &&
2335match(Op0,m_LShr(m_AllOnes(),m_Value(Y))))) {
2336constAPInt *C;
2337if ((match(X,m_Sub(m_APInt(C),m_Specific(Y))) ||
2338match(Y,m_Sub(m_APInt(C),m_Specific(X)))) &&
2339C->ule(X->getType()->getScalarSizeInBits())) {
2340return ConstantInt::getAllOnesValue(X->getType());
2341 }
2342 }
2343
2344// A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2345// are mixing in another shift that is redundant with the funnel shift.
2346
2347// (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2348// (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2349if (match(Op0,
2350 m_Intrinsic<Intrinsic::fshl>(m_Value(X),m_Value(),m_Value(Y))) &&
2351match(Op1,m_Shl(m_Specific(X),m_Specific(Y))))
2352return Op0;
2353if (match(Op1,
2354 m_Intrinsic<Intrinsic::fshl>(m_Value(X),m_Value(),m_Value(Y))) &&
2355match(Op0,m_Shl(m_Specific(X),m_Specific(Y))))
2356return Op1;
2357
2358// (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2359// (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2360if (match(Op0,
2361 m_Intrinsic<Intrinsic::fshr>(m_Value(),m_Value(X),m_Value(Y))) &&
2362match(Op1,m_LShr(m_Specific(X),m_Specific(Y))))
2363return Op0;
2364if (match(Op1,
2365 m_Intrinsic<Intrinsic::fshr>(m_Value(),m_Value(X),m_Value(Y))) &&
2366match(Op0,m_LShr(m_Specific(X),m_Specific(Y))))
2367return Op1;
2368
2369if (Value *V =
2370simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2371return V;
2372if (Value *V =
2373simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2374return V;
2375
2376if (Value *V =simplifyAndOrOfCmps(Q, Op0, Op1,false))
2377return V;
2378
2379// If we have a multiplication overflow check that is being 'and'ed with a
2380// check that one of the multipliers is not zero, we can omit the 'and', and
2381// only keep the overflow check.
2382if (isCheckForZeroAndMulWithOverflow(Op0, Op1,false))
2383return Op1;
2384if (isCheckForZeroAndMulWithOverflow(Op1, Op0,false))
2385return Op0;
2386
2387// Try some generic simplifications for associative operations.
2388if (Value *V =
2389simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2390return V;
2391
2392// Or distributes over And. Try some generic simplifications based on this.
2393if (Value *V =expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2394 Instruction::And, Q, MaxRecurse))
2395return V;
2396
2397if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2398if (Op0->getType()->isIntOrIntVectorTy(1)) {
2399// A | (A || B) -> A || B
2400if (match(Op1,m_Select(m_Specific(Op0),m_One(),m_Value())))
2401return Op1;
2402elseif (match(Op0,m_Select(m_Specific(Op1),m_One(),m_Value())))
2403return Op0;
2404 }
2405// If the operation is with the result of a select instruction, check
2406// whether operating on either branch of the select always yields the same
2407// value.
2408if (Value *V =
2409threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2410return V;
2411 }
2412
2413// (A & C1)|(B & C2)
2414Value *A, *B;
2415constAPInt *C1, *C2;
2416if (match(Op0,m_And(m_Value(A),m_APInt(C1))) &&
2417match(Op1,m_And(m_Value(B),m_APInt(C2)))) {
2418if (*C1 == ~*C2) {
2419// (A & C1)|(B & C2)
2420// If we have: ((V + N) & C1) | (V & C2)
2421// .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2422// replace with V+N.
2423Value *N;
2424if (C2->isMask() &&// C2 == 0+1+
2425match(A,m_c_Add(m_Specific(B),m_Value(N)))) {
2426// Add commutes, try both ways.
2427if (MaskedValueIsZero(N, *C2, Q))
2428returnA;
2429 }
2430// Or commutes, try both ways.
2431if (C1->isMask() &&match(B,m_c_Add(m_Specific(A),m_Value(N)))) {
2432// Add commutes, try both ways.
2433if (MaskedValueIsZero(N, *C1, Q))
2434returnB;
2435 }
2436 }
2437 }
2438
2439// If the operation is with the result of a phi instruction, check whether
2440// operating on all incoming values of the phi always yields the same value.
2441if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2442if (Value *V =threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2443return V;
2444
2445// (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2446if (match(Op0,m_Xor(m_Value(A),m_APInt(C1))) &&
2447match(Op1,m_Xor(m_Specific(A),m_SpecificInt(~*C1))))
2448returnConstant::getAllOnesValue(Op0->getType());
2449
2450if (Op0->getType()->isIntOrIntVectorTy(1)) {
2451if (std::optional<bool> Implied =
2452isImpliedCondition(Op0, Op1, Q.DL,false)) {
2453// If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2454if (*Implied ==false)
2455return Op0;
2456// If Op0 is false implies Op1 is true, then at least one is always true.
2457if (*Implied ==true)
2458returnConstantInt::getTrue(Op0->getType());
2459 }
2460if (std::optional<bool> Implied =
2461isImpliedCondition(Op1, Op0, Q.DL,false)) {
2462// If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2463if (*Implied ==false)
2464return Op1;
2465// If Op1 is false implies Op0 is true, then at least one is always true.
2466if (*Implied ==true)
2467returnConstantInt::getTrue(Op1->getType());
2468 }
2469 }
2470
2471if (Value *V =simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2472return V;
2473
2474returnnullptr;
2475}
2476
2477Value *llvm::simplifyOrInst(Value *Op0,Value *Op1,constSimplifyQuery &Q) {
2478 return ::simplifyOrInst(Op0, Op1, Q,RecursionLimit);
2479}
2480
2481/// Given operands for a Xor, see if we can fold the result.
2482/// If not, this returns null.
2483staticValue *simplifyXorInst(Value *Op0,Value *Op1,constSimplifyQuery &Q,
2484unsigned MaxRecurse) {
2485if (Constant *C =foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2486returnC;
2487
2488// X ^ poison -> poison
2489if (isa<PoisonValue>(Op1))
2490return Op1;
2491
2492// A ^ undef -> undef
2493if (Q.isUndefValue(Op1))
2494return Op1;
2495
2496// A ^ 0 = A
2497if (match(Op1,m_Zero()))
2498return Op0;
2499
2500// A ^ A = 0
2501if (Op0 == Op1)
2502returnConstant::getNullValue(Op0->getType());
2503
2504// A ^ ~A = ~A ^ A = -1
2505if (match(Op0,m_Not(m_Specific(Op1))) ||match(Op1,m_Not(m_Specific(Op0))))
2506returnConstant::getAllOnesValue(Op0->getType());
2507
2508auto foldAndOrNot = [](Value *X,Value *Y) ->Value * {
2509Value *A, *B;
2510// (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2511if (match(X,m_c_And(m_Not(m_Value(A)),m_Value(B))) &&
2512match(Y,m_c_Or(m_Specific(A),m_Specific(B))))
2513returnA;
2514
2515// (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2516// The 'not' op must contain a complete -1 operand (no undef elements for
2517// vector) for the transform to be safe.
2518Value *NotA;
2519if (match(X,m_c_Or(m_CombineAnd(m_Not(m_Value(A)),m_Value(NotA)),
2520m_Value(B))) &&
2521match(Y,m_c_And(m_Specific(A),m_Specific(B))))
2522return NotA;
2523
2524returnnullptr;
2525 };
2526if (Value *R = foldAndOrNot(Op0, Op1))
2527return R;
2528if (Value *R = foldAndOrNot(Op1, Op0))
2529return R;
2530
2531if (Value *V =simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2532return V;
2533
2534// Try some generic simplifications for associative operations.
2535if (Value *V =
2536simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2537return V;
2538
2539// Threading Xor over selects and phi nodes is pointless, so don't bother.
2540// Threading over the select in "A ^ select(cond, B, C)" means evaluating
2541// "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2542// only if B and C are equal. If B and C are equal then (since we assume
2543// that operands have already been simplified) "select(cond, B, C)" should
2544// have been simplified to the common value of B and C already. Analysing
2545// "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2546// for threading over phi nodes.
2547
2548if (Value *V =simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2549return V;
2550
2551// (xor (sub nuw C_Mask, X), C_Mask) -> X
2552 {
2553Value *X;
2554if (match(Op0,m_NUWSub(m_Specific(Op1),m_Value(X))) &&
2555match(Op1,m_LowBitMask()))
2556returnX;
2557 }
2558
2559returnnullptr;
2560}
2561
2562Value *llvm::simplifyXorInst(Value *Op0,Value *Op1,constSimplifyQuery &Q) {
2563 return ::simplifyXorInst(Op0, Op1, Q,RecursionLimit);
2564}
2565
2566staticType *getCompareTy(Value *Op) {
2567returnCmpInst::makeCmpResultType(Op->getType());
2568}
2569
2570/// Rummage around inside V looking for something equivalent to the comparison
2571/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2572/// Helper function for analyzing max/min idioms.
2573staticValue *extractEquivalentCondition(Value *V,CmpPredicate Pred,
2574Value *LHS,Value *RHS) {
2575SelectInst *SI = dyn_cast<SelectInst>(V);
2576if (!SI)
2577returnnullptr;
2578CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2579if (!Cmp)
2580returnnullptr;
2581Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2582if (Pred == Cmp->getPredicate() &&LHS == CmpLHS &&RHS == CmpRHS)
2583return Cmp;
2584if (Pred ==CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2585LHS == CmpRHS &&RHS == CmpLHS)
2586return Cmp;
2587returnnullptr;
2588}
2589
2590/// Return true if the underlying object (storage) must be disjoint from
2591/// storage returned by any noalias return call.
2592staticboolisAllocDisjoint(constValue *V) {
2593// For allocas, we consider only static ones (dynamic
2594// allocas might be transformed into calls to malloc not simultaneously
2595// live with the compared-to allocation). For globals, we exclude symbols
2596// that might be resolve lazily to symbols in another dynamically-loaded
2597// library (and, thus, could be malloc'ed by the implementation).
2598if (constAllocaInst *AI = dyn_cast<AllocaInst>(V))
2599return AI->isStaticAlloca();
2600if (constGlobalValue *GV = dyn_cast<GlobalValue>(V))
2601return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2602 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2603 !GV->isThreadLocal();
2604if (constArgument *A = dyn_cast<Argument>(V))
2605returnA->hasByValAttr();
2606returnfalse;
2607}
2608
2609/// Return true if V1 and V2 are each the base of some distict storage region
2610/// [V, object_size(V)] which do not overlap. Note that zero sized regions
2611/// *are* possible, and that zero sized regions do not overlap with any other.
2612staticboolhaveNonOverlappingStorage(constValue *V1,constValue *V2) {
2613// Global variables always exist, so they always exist during the lifetime
2614// of each other and all allocas. Global variables themselves usually have
2615// non-overlapping storage, but since their addresses are constants, the
2616// case involving two globals does not reach here and is instead handled in
2617// constant folding.
2618//
2619// Two different allocas usually have different addresses...
2620//
2621// However, if there's an @llvm.stackrestore dynamically in between two
2622// allocas, they may have the same address. It's tempting to reduce the
2623// scope of the problem by only looking at *static* allocas here. That would
2624// cover the majority of allocas while significantly reducing the likelihood
2625// of having an @llvm.stackrestore pop up in the middle. However, it's not
2626// actually impossible for an @llvm.stackrestore to pop up in the middle of
2627// an entry block. Also, if we have a block that's not attached to a
2628// function, we can't tell if it's "static" under the current definition.
2629// Theoretically, this problem could be fixed by creating a new kind of
2630// instruction kind specifically for static allocas. Such a new instruction
2631// could be required to be at the top of the entry block, thus preventing it
2632// from being subject to a @llvm.stackrestore. Instcombine could even
2633// convert regular allocas into these special allocas. It'd be nifty.
2634// However, until then, this problem remains open.
2635//
2636// So, we'll assume that two non-empty allocas have different addresses
2637// for now.
2638auto isByValArg = [](constValue *V) {
2639constArgument *A = dyn_cast<Argument>(V);
2640returnA &&A->hasByValAttr();
2641 };
2642
2643// Byval args are backed by store which does not overlap with each other,
2644// allocas, or globals.
2645if (isByValArg(V1))
2646return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2647if (isByValArg(V2))
2648return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2649
2650return isa<AllocaInst>(V1) &&
2651 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2652}
2653
2654// A significant optimization not implemented here is assuming that alloca
2655// addresses are not equal to incoming argument values. They don't *alias*,
2656// as we say, but that doesn't mean they aren't equal, so we take a
2657// conservative approach.
2658//
2659// This is inspired in part by C++11 5.10p1:
2660// "Two pointers of the same type compare equal if and only if they are both
2661// null, both point to the same function, or both represent the same
2662// address."
2663//
2664// This is pretty permissive.
2665//
2666// It's also partly due to C11 6.5.9p6:
2667// "Two pointers compare equal if and only if both are null pointers, both are
2668// pointers to the same object (including a pointer to an object and a
2669// subobject at its beginning) or function, both are pointers to one past the
2670// last element of the same array object, or one is a pointer to one past the
2671// end of one array object and the other is a pointer to the start of a
2672// different array object that happens to immediately follow the first array
2673// object in the address space.)
2674//
2675// C11's version is more restrictive, however there's no reason why an argument
2676// couldn't be a one-past-the-end value for a stack object in the caller and be
2677// equal to the beginning of a stack object in the callee.
2678//
2679// If the C and C++ standards are ever made sufficiently restrictive in this
2680// area, it may be possible to update LLVM's semantics accordingly and reinstate
2681// this optimization.
2682staticConstant *computePointerICmp(CmpPredicate Pred,Value *LHS,Value *RHS,
2683constSimplifyQuery &Q) {
2684assert(LHS->getType() ==RHS->getType() &&"Must have same types");
2685constDataLayout &DL = Q.DL;
2686constTargetLibraryInfo *TLI = Q.TLI;
2687
2688// We can only fold certain predicates on pointer comparisons.
2689switch (Pred) {
2690default:
2691returnnullptr;
2692
2693// Equality comparisons are easy to fold.
2694caseCmpInst::ICMP_EQ:
2695caseCmpInst::ICMP_NE:
2696break;
2697
2698// We can only handle unsigned relational comparisons because 'inbounds' on
2699// a GEP only protects against unsigned wrapping.
2700caseCmpInst::ICMP_UGT:
2701caseCmpInst::ICMP_UGE:
2702caseCmpInst::ICMP_ULT:
2703caseCmpInst::ICMP_ULE:
2704// However, we have to switch them to their signed variants to handle
2705// negative indices from the base pointer.
2706 Pred =ICmpInst::getSignedPredicate(Pred);
2707break;
2708 }
2709
2710// Strip off any constant offsets so that we can reason about them.
2711// It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2712// here and compare base addresses like AliasAnalysis does, however there are
2713// numerous hazards. AliasAnalysis and its utilities rely on special rules
2714// governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2715// doesn't need to guarantee pointer inequality when it says NoAlias.
2716
2717// Even if an non-inbounds GEP occurs along the path we can still optimize
2718// equality comparisons concerning the result.
2719bool AllowNonInbounds =ICmpInst::isEquality(Pred);
2720unsigned IndexSize =DL.getIndexTypeSizeInBits(LHS->getType());
2721APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2722LHS =LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2723RHS =RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2724
2725// If LHS and RHS are related via constant offsets to the same base
2726// value, we can replace it with an icmp which just compares the offsets.
2727if (LHS ==RHS)
2728return ConstantInt::get(getCompareTy(LHS),
2729ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2730
2731// Various optimizations for (in)equality comparisons.
2732if (Pred ==CmpInst::ICMP_EQ || Pred ==CmpInst::ICMP_NE) {
2733// Different non-empty allocations that exist at the same time have
2734// different addresses (if the program can tell). If the offsets are
2735// within the bounds of their allocations (and not one-past-the-end!
2736// so we can't use inbounds!), and their allocations aren't the same,
2737// the pointers are not equal.
2738if (haveNonOverlappingStorage(LHS,RHS)) {
2739uint64_t LHSSize, RHSSize;
2740ObjectSizeOpts Opts;
2741 Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2742auto *F = [](Value *V) ->Function * {
2743if (auto *I = dyn_cast<Instruction>(V))
2744returnI->getFunction();
2745if (auto *A = dyn_cast<Argument>(V))
2746returnA->getParent();
2747returnnullptr;
2748 }(LHS);
2749 Opts.NullIsUnknownSize =F ?NullPointerIsDefined(F) :true;
2750if (getObjectSize(LHS, LHSSize,DL, TLI, Opts) && LHSSize != 0 &&
2751getObjectSize(RHS, RHSSize,DL, TLI, Opts) && RHSSize != 0) {
2752APInt Dist = LHSOffset - RHSOffset;
2753if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2754return ConstantInt::get(getCompareTy(LHS),
2755 !CmpInst::isTrueWhenEqual(Pred));
2756 }
2757 }
2758
2759// If one side of the equality comparison must come from a noalias call
2760// (meaning a system memory allocation function), and the other side must
2761// come from a pointer that cannot overlap with dynamically-allocated
2762// memory within the lifetime of the current function (allocas, byval
2763// arguments, globals), then determine the comparison result here.
2764SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2765getUnderlyingObjects(LHS, LHSUObjs);
2766getUnderlyingObjects(RHS, RHSUObjs);
2767
2768// Is the set of underlying objects all noalias calls?
2769auto IsNAC = [](ArrayRef<const Value *> Objects) {
2770returnall_of(Objects,isNoAliasCall);
2771 };
2772
2773// Is the set of underlying objects all things which must be disjoint from
2774// noalias calls. We assume that indexing from such disjoint storage
2775// into the heap is undefined, and thus offsets can be safely ignored.
2776auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2777returnall_of(Objects,::isAllocDisjoint);
2778 };
2779
2780if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2781 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2782return ConstantInt::get(getCompareTy(LHS),
2783 !CmpInst::isTrueWhenEqual(Pred));
2784
2785// Fold comparisons for non-escaping pointer even if the allocation call
2786// cannot be elided. We cannot fold malloc comparison to null. Also, the
2787// dynamic allocation call could be either of the operands. Note that
2788// the other operand can not be based on the alloc - if it were, then
2789// the cmp itself would be a capture.
2790Value *MI =nullptr;
2791if (isAllocLikeFn(LHS, TLI) &&llvm::isKnownNonZero(RHS, Q))
2792MI =LHS;
2793elseif (isAllocLikeFn(RHS, TLI) &&llvm::isKnownNonZero(LHS, Q))
2794MI =RHS;
2795if (MI) {
2796// FIXME: This is incorrect, see PR54002. While we can assume that the
2797// allocation is at an address that makes the comparison false, this
2798// requires that *all* comparisons to that address be false, which
2799// InstSimplify cannot guarantee.
2800structCustomCaptureTracker :publicCaptureTracker {
2801bool Captured =false;
2802voidtooManyUses() override{ Captured =true; }
2803boolcaptured(constUse *U) override{
2804if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2805// Comparison against value stored in global variable. Given the
2806// pointer does not escape, its value cannot be guessed and stored
2807// separately in a global variable.
2808unsigned OtherIdx = 1 - U->getOperandNo();
2809auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2810if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2811returnfalse;
2812 }
2813
2814 Captured =true;
2815returntrue;
2816 }
2817 };
2818 CustomCaptureTracker Tracker;
2819PointerMayBeCaptured(MI, &Tracker);
2820if (!Tracker.Captured)
2821return ConstantInt::get(getCompareTy(LHS),
2822CmpInst::isFalseWhenEqual(Pred));
2823 }
2824 }
2825
2826// Otherwise, fail.
2827returnnullptr;
2828}
2829
2830/// Fold an icmp when its operands have i1 scalar type.
2831staticValue *simplifyICmpOfBools(CmpPredicate Pred,Value *LHS,Value *RHS,
2832constSimplifyQuery &Q) {
2833Type *ITy =getCompareTy(LHS);// The return type.
2834Type *OpTy =LHS->getType();// The operand type.
2835if (!OpTy->isIntOrIntVectorTy(1))
2836returnnullptr;
2837
2838// A boolean compared to true/false can be reduced in 14 out of the 20
2839// (10 predicates * 2 constants) possible combinations. The other
2840// 6 cases require a 'not' of the LHS.
2841
2842auto ExtractNotLHS = [](Value *V) ->Value * {
2843Value *X;
2844if (match(V,m_Not(m_Value(X))))
2845returnX;
2846returnnullptr;
2847 };
2848
2849if (match(RHS,m_Zero())) {
2850switch (Pred) {
2851caseCmpInst::ICMP_NE:// X != 0 -> X
2852caseCmpInst::ICMP_UGT:// X >u 0 -> X
2853caseCmpInst::ICMP_SLT:// X <s 0 -> X
2854returnLHS;
2855
2856caseCmpInst::ICMP_EQ:// not(X) == 0 -> X != 0 -> X
2857caseCmpInst::ICMP_ULE:// not(X) <=u 0 -> X >u 0 -> X
2858caseCmpInst::ICMP_SGE:// not(X) >=s 0 -> X <s 0 -> X
2859if (Value *X = ExtractNotLHS(LHS))
2860returnX;
2861break;
2862
2863caseCmpInst::ICMP_ULT:// X <u 0 -> false
2864caseCmpInst::ICMP_SGT:// X >s 0 -> false
2865returngetFalse(ITy);
2866
2867caseCmpInst::ICMP_UGE:// X >=u 0 -> true
2868caseCmpInst::ICMP_SLE:// X <=s 0 -> true
2869returngetTrue(ITy);
2870
2871default:
2872break;
2873 }
2874 }elseif (match(RHS,m_One())) {
2875switch (Pred) {
2876caseCmpInst::ICMP_EQ:// X == 1 -> X
2877caseCmpInst::ICMP_UGE:// X >=u 1 -> X
2878caseCmpInst::ICMP_SLE:// X <=s -1 -> X
2879returnLHS;
2880
2881caseCmpInst::ICMP_NE:// not(X) != 1 -> X == 1 -> X
2882caseCmpInst::ICMP_ULT:// not(X) <=u 1 -> X >=u 1 -> X
2883caseCmpInst::ICMP_SGT:// not(X) >s 1 -> X <=s -1 -> X
2884if (Value *X = ExtractNotLHS(LHS))
2885returnX;
2886break;
2887
2888caseCmpInst::ICMP_UGT:// X >u 1 -> false
2889caseCmpInst::ICMP_SLT:// X <s -1 -> false
2890returngetFalse(ITy);
2891
2892caseCmpInst::ICMP_ULE:// X <=u 1 -> true
2893caseCmpInst::ICMP_SGE:// X >=s -1 -> true
2894returngetTrue(ITy);
2895
2896default:
2897break;
2898 }
2899 }
2900
2901switch (Pred) {
2902default:
2903break;
2904case ICmpInst::ICMP_UGE:
2905if (isImpliedCondition(RHS,LHS, Q.DL).value_or(false))
2906returngetTrue(ITy);
2907break;
2908case ICmpInst::ICMP_SGE:
2909 /// For signed comparison, the values for an i1 are 0 and -1
2910 /// respectively. This maps into a truth table of:
2911 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2912 /// 0 | 0 | 1 (0 >= 0) | 1
2913 /// 0 | 1 | 1 (0 >= -1) | 1
2914 /// 1 | 0 | 0 (-1 >= 0) | 0
2915 /// 1 | 1 | 1 (-1 >= -1) | 1
2916if (isImpliedCondition(LHS,RHS, Q.DL).value_or(false))
2917returngetTrue(ITy);
2918break;
2919case ICmpInst::ICMP_ULE:
2920if (isImpliedCondition(LHS,RHS, Q.DL).value_or(false))
2921returngetTrue(ITy);
2922break;
2923case ICmpInst::ICMP_SLE:
2924 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2925if (isImpliedCondition(RHS,LHS, Q.DL).value_or(false))
2926returngetTrue(ITy);
2927break;
2928 }
2929
2930returnnullptr;
2931}
2932
2933/// Try hard to fold icmp with zero RHS because this is a common case.
2934staticValue *simplifyICmpWithZero(CmpPredicate Pred,Value *LHS,Value *RHS,
2935constSimplifyQuery &Q) {
2936if (!match(RHS,m_Zero()))
2937returnnullptr;
2938
2939Type *ITy =getCompareTy(LHS);// The return type.
2940switch (Pred) {
2941default:
2942llvm_unreachable("Unknown ICmp predicate!");
2943case ICmpInst::ICMP_ULT:
2944returngetFalse(ITy);
2945case ICmpInst::ICMP_UGE:
2946returngetTrue(ITy);
2947case ICmpInst::ICMP_EQ:
2948case ICmpInst::ICMP_ULE:
2949if (isKnownNonZero(LHS, Q))
2950returngetFalse(ITy);
2951break;
2952case ICmpInst::ICMP_NE:
2953case ICmpInst::ICMP_UGT:
2954if (isKnownNonZero(LHS, Q))
2955returngetTrue(ITy);
2956break;
2957case ICmpInst::ICMP_SLT: {
2958KnownBits LHSKnown =computeKnownBits(LHS,/* Depth */ 0, Q);
2959if (LHSKnown.isNegative())
2960returngetTrue(ITy);
2961if (LHSKnown.isNonNegative())
2962returngetFalse(ITy);
2963break;
2964 }
2965case ICmpInst::ICMP_SLE: {
2966KnownBits LHSKnown =computeKnownBits(LHS,/* Depth */ 0, Q);
2967if (LHSKnown.isNegative())
2968returngetTrue(ITy);
2969if (LHSKnown.isNonNegative() &&isKnownNonZero(LHS, Q))
2970returngetFalse(ITy);
2971break;
2972 }
2973case ICmpInst::ICMP_SGE: {
2974KnownBits LHSKnown =computeKnownBits(LHS,/* Depth */ 0, Q);
2975if (LHSKnown.isNegative())
2976returngetFalse(ITy);
2977if (LHSKnown.isNonNegative())
2978returngetTrue(ITy);
2979break;
2980 }
2981case ICmpInst::ICMP_SGT: {
2982KnownBits LHSKnown =computeKnownBits(LHS,/* Depth */ 0, Q);
2983if (LHSKnown.isNegative())
2984returngetFalse(ITy);
2985if (LHSKnown.isNonNegative() &&isKnownNonZero(LHS, Q))
2986returngetTrue(ITy);
2987break;
2988 }
2989 }
2990
2991returnnullptr;
2992}
2993
2994staticValue *simplifyICmpWithConstant(CmpPredicate Pred,Value *LHS,
2995Value *RHS,constInstrInfoQuery &IIQ) {
2996Type *ITy =getCompareTy(RHS);// The return type.
2997
2998Value *X;
2999constAPInt *C;
3000if (!match(RHS,m_APIntAllowPoison(C)))
3001returnnullptr;
3002
3003// Sign-bit checks can be optimized to true/false after unsigned
3004// floating-point casts:
3005// icmp slt (bitcast (uitofp X)), 0 --> false
3006// icmp sgt (bitcast (uitofp X)), -1 --> true
3007if (match(LHS,m_ElementWiseBitCast(m_UIToFP(m_Value(X))))) {
3008bool TrueIfSigned;
3009if (isSignBitCheck(Pred, *C, TrueIfSigned))
3010returnConstantInt::getBool(ITy, !TrueIfSigned);
3011 }
3012
3013// Rule out tautological comparisons (eg., ult 0 or uge 0).
3014ConstantRange RHS_CR =ConstantRange::makeExactICmpRegion(Pred, *C);
3015if (RHS_CR.isEmptySet())
3016returnConstantInt::getFalse(ITy);
3017if (RHS_CR.isFullSet())
3018returnConstantInt::getTrue(ITy);
3019
3020ConstantRange LHS_CR =
3021computeConstantRange(LHS,CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
3022if (!LHS_CR.isFullSet()) {
3023if (RHS_CR.contains(LHS_CR))
3024returnConstantInt::getTrue(ITy);
3025if (RHS_CR.inverse().contains(LHS_CR))
3026returnConstantInt::getFalse(ITy);
3027 }
3028
3029// (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3030// (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3031constAPInt *MulC;
3032if (IIQ.UseInstrInfo &&ICmpInst::isEquality(Pred) &&
3033 ((match(LHS,m_NUWMul(m_Value(),m_APIntAllowPoison(MulC))) &&
3034 *MulC != 0 &&C->urem(*MulC) != 0) ||
3035 (match(LHS,m_NSWMul(m_Value(),m_APIntAllowPoison(MulC))) &&
3036 *MulC != 0 &&C->srem(*MulC) != 0)))
3037return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3038
3039returnnullptr;
3040}
3041
3042enum classMonotonicType {GreaterEq,LowerEq };
3043
3044/// Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
3045staticvoidgetUnsignedMonotonicValues(SmallPtrSetImpl<Value *> &Res,Value *V,
3046MonotonicTypeType,unsignedDepth = 0) {
3047if (!Res.insert(V).second)
3048return;
3049
3050// Can be increased if useful.
3051if (++Depth > 1)
3052return;
3053
3054auto *I = dyn_cast<Instruction>(V);
3055if (!I)
3056return;
3057
3058Value *X, *Y;
3059if (Type ==MonotonicType::GreaterEq) {
3060if (match(I,m_Or(m_Value(X),m_Value(Y))) ||
3061match(I, m_Intrinsic<Intrinsic::uadd_sat>(m_Value(X),m_Value(Y)))) {
3062getUnsignedMonotonicValues(Res,X,Type,Depth);
3063getUnsignedMonotonicValues(Res,Y,Type,Depth);
3064 }
3065 }else {
3066assert(Type ==MonotonicType::LowerEq);
3067switch (I->getOpcode()) {
3068case Instruction::And:
3069getUnsignedMonotonicValues(Res,I->getOperand(0),Type,Depth);
3070getUnsignedMonotonicValues(Res,I->getOperand(1),Type,Depth);
3071break;
3072case Instruction::URem:
3073case Instruction::UDiv:
3074case Instruction::LShr:
3075getUnsignedMonotonicValues(Res,I->getOperand(0),Type,Depth);
3076break;
3077case Instruction::Call:
3078if (match(I, m_Intrinsic<Intrinsic::usub_sat>(m_Value(X))))
3079getUnsignedMonotonicValues(Res,X,Type,Depth);
3080break;
3081default:
3082break;
3083 }
3084 }
3085}
3086
3087staticValue *simplifyICmpUsingMonotonicValues(CmpPredicate Pred,Value *LHS,
3088Value *RHS) {
3089if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3090returnnullptr;
3091
3092// We have LHS uge GreaterValues and LowerValues uge RHS. If any of the
3093// GreaterValues and LowerValues are the same, it follows that LHS uge RHS.
3094SmallPtrSet<Value *, 4> GreaterValues;
3095SmallPtrSet<Value *, 4> LowerValues;
3096getUnsignedMonotonicValues(GreaterValues,LHS,MonotonicType::GreaterEq);
3097getUnsignedMonotonicValues(LowerValues,RHS,MonotonicType::LowerEq);
3098for (Value *GV : GreaterValues)
3099if (LowerValues.contains(GV))
3100returnConstantInt::getBool(getCompareTy(LHS),
3101 Pred == ICmpInst::ICMP_UGE);
3102returnnullptr;
3103}
3104
3105staticValue *simplifyICmpWithBinOpOnLHS(CmpPredicate Pred,BinaryOperator *LBO,
3106Value *RHS,constSimplifyQuery &Q,
3107unsigned MaxRecurse) {
3108Type *ITy =getCompareTy(RHS);// The return type.
3109
3110Value *Y =nullptr;
3111// icmp pred (or X, Y), X
3112if (match(LBO,m_c_Or(m_Value(Y),m_Specific(RHS)))) {
3113if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3114KnownBits RHSKnown =computeKnownBits(RHS,/* Depth */ 0, Q);
3115KnownBits YKnown =computeKnownBits(Y,/* Depth */ 0, Q);
3116if (RHSKnown.isNonNegative() && YKnown.isNegative())
3117return Pred == ICmpInst::ICMP_SLT ?getTrue(ITy) :getFalse(ITy);
3118if (RHSKnown.isNegative() || YKnown.isNonNegative())
3119return Pred == ICmpInst::ICMP_SLT ?getFalse(ITy) :getTrue(ITy);
3120 }
3121 }
3122
3123// icmp pred (urem X, Y), Y
3124if (match(LBO,m_URem(m_Value(),m_Specific(RHS)))) {
3125switch (Pred) {
3126default:
3127break;
3128case ICmpInst::ICMP_SGT:
3129case ICmpInst::ICMP_SGE: {
3130KnownBits Known =computeKnownBits(RHS,/* Depth */ 0, Q);
3131if (!Known.isNonNegative())
3132break;
3133 [[fallthrough]];
3134 }
3135case ICmpInst::ICMP_EQ:
3136case ICmpInst::ICMP_UGT:
3137case ICmpInst::ICMP_UGE:
3138returngetFalse(ITy);
3139case ICmpInst::ICMP_SLT:
3140case ICmpInst::ICMP_SLE: {
3141KnownBits Known =computeKnownBits(RHS,/* Depth */ 0, Q);
3142if (!Known.isNonNegative())
3143break;
3144 [[fallthrough]];
3145 }
3146case ICmpInst::ICMP_NE:
3147case ICmpInst::ICMP_ULT:
3148case ICmpInst::ICMP_ULE:
3149returngetTrue(ITy);
3150 }
3151 }
3152
3153// If x is nonzero:
3154// x >>u C <u x --> true for C != 0.
3155// x >>u C != x --> true for C != 0.
3156// x >>u C >=u x --> false for C != 0.
3157// x >>u C == x --> false for C != 0.
3158// x udiv C <u x --> true for C != 1.
3159// x udiv C != x --> true for C != 1.
3160// x udiv C >=u x --> false for C != 1.
3161// x udiv C == x --> false for C != 1.
3162// TODO: allow non-constant shift amount/divisor
3163constAPInt *C;
3164if ((match(LBO,m_LShr(m_Specific(RHS),m_APInt(C))) && *C != 0) ||
3165 (match(LBO,m_UDiv(m_Specific(RHS),m_APInt(C))) && *C != 1)) {
3166if (isKnownNonZero(RHS, Q)) {
3167switch (Pred) {
3168default:
3169break;
3170case ICmpInst::ICMP_EQ:
3171case ICmpInst::ICMP_UGE:
3172case ICmpInst::ICMP_UGT:
3173returngetFalse(ITy);
3174case ICmpInst::ICMP_NE:
3175case ICmpInst::ICMP_ULT:
3176case ICmpInst::ICMP_ULE:
3177returngetTrue(ITy);
3178 }
3179 }
3180 }
3181
3182// (x*C1)/C2 <= x for C1 <= C2.
3183// This holds even if the multiplication overflows: Assume that x != 0 and
3184// arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3185// thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3186//
3187// Additionally, either the multiplication and division might be represented
3188// as shifts:
3189// (x*C1)>>C2 <= x for C1 < 2**C2.
3190// (x<<C1)/C2 <= x for 2**C1 < C2.
3191constAPInt *C1, *C2;
3192if ((match(LBO,m_UDiv(m_Mul(m_Specific(RHS),m_APInt(C1)),m_APInt(C2))) &&
3193 C1->ule(*C2)) ||
3194 (match(LBO,m_LShr(m_Mul(m_Specific(RHS),m_APInt(C1)),m_APInt(C2))) &&
3195 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3196 (match(LBO,m_UDiv(m_Shl(m_Specific(RHS),m_APInt(C1)),m_APInt(C2))) &&
3197 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3198if (Pred == ICmpInst::ICMP_UGT)
3199returngetFalse(ITy);
3200if (Pred == ICmpInst::ICMP_ULE)
3201returngetTrue(ITy);
3202 }
3203
3204// (sub C, X) == X, C is odd --> false
3205// (sub C, X) != X, C is odd --> true
3206if (match(LBO,m_Sub(m_APIntAllowPoison(C),m_Specific(RHS))) &&
3207 (*C & 1) == 1 &&ICmpInst::isEquality(Pred))
3208return (Pred == ICmpInst::ICMP_EQ) ?getFalse(ITy) :getTrue(ITy);
3209
3210returnnullptr;
3211}
3212
3213// If only one of the icmp's operands has NSW flags, try to prove that:
3214//
3215// icmp slt (x + C1), (x +nsw C2)
3216//
3217// is equivalent to:
3218//
3219// icmp slt C1, C2
3220//
3221// which is true if x + C2 has the NSW flags set and:
3222// *) C1 < C2 && C1 >= 0, or
3223// *) C2 < C1 && C1 <= 0.
3224//
3225staticbooltrySimplifyICmpWithAdds(CmpPredicate Pred,Value *LHS,Value *RHS,
3226constInstrInfoQuery &IIQ) {
3227// TODO: only support icmp slt for now.
3228if (Pred !=CmpInst::ICMP_SLT || !IIQ.UseInstrInfo)
3229returnfalse;
3230
3231// Canonicalize nsw add as RHS.
3232if (!match(RHS,m_NSWAdd(m_Value(),m_Value())))
3233std::swap(LHS,RHS);
3234if (!match(RHS,m_NSWAdd(m_Value(),m_Value())))
3235returnfalse;
3236
3237Value *X;
3238constAPInt *C1, *C2;
3239if (!match(LHS,m_Add(m_Value(X),m_APInt(C1))) ||
3240 !match(RHS,m_Add(m_Specific(X),m_APInt(C2))))
3241returnfalse;
3242
3243return (C1->slt(*C2) && C1->isNonNegative()) ||
3244 (C2->slt(*C1) && C1->isNonPositive());
3245}
3246
3247/// TODO: A large part of this logic is duplicated in InstCombine's
3248/// foldICmpBinOp(). We should be able to share that and avoid the code
3249/// duplication.
3250staticValue *simplifyICmpWithBinOp(CmpPredicate Pred,Value *LHS,Value *RHS,
3251constSimplifyQuery &Q,
3252unsigned MaxRecurse) {
3253BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3254BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3255if (MaxRecurse && (LBO || RBO)) {
3256// Analyze the case when either LHS or RHS is an add instruction.
3257Value *A =nullptr, *B =nullptr, *C =nullptr, *D =nullptr;
3258// LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3259bool NoLHSWrapProblem =false, NoRHSWrapProblem =false;
3260if (LBO && LBO->getOpcode() == Instruction::Add) {
3261A = LBO->getOperand(0);
3262B = LBO->getOperand(1);
3263 NoLHSWrapProblem =
3264ICmpInst::isEquality(Pred) ||
3265 (CmpInst::isUnsigned(Pred) &&
3266 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3267 (CmpInst::isSigned(Pred) &&
3268 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3269 }
3270if (RBO && RBO->getOpcode() == Instruction::Add) {
3271C = RBO->getOperand(0);
3272D = RBO->getOperand(1);
3273 NoRHSWrapProblem =
3274ICmpInst::isEquality(Pred) ||
3275 (CmpInst::isUnsigned(Pred) &&
3276 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3277 (CmpInst::isSigned(Pred) &&
3278 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3279 }
3280
3281// icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3282if ((A ==RHS ||B ==RHS) && NoLHSWrapProblem)
3283if (Value *V =simplifyICmpInst(Pred,A ==RHS ?B :A,
3284Constant::getNullValue(RHS->getType()), Q,
3285 MaxRecurse - 1))
3286return V;
3287
3288// icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3289if ((C ==LHS ||D ==LHS) && NoRHSWrapProblem)
3290if (Value *V =
3291simplifyICmpInst(Pred,Constant::getNullValue(LHS->getType()),
3292C ==LHS ?D :C, Q, MaxRecurse - 1))
3293return V;
3294
3295// icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3296bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3297trySimplifyICmpWithAdds(Pred,LHS,RHS, Q.IIQ);
3298if (A &&C && (A ==C ||A ==D ||B ==C ||B ==D) && CanSimplify) {
3299// Determine Y and Z in the form icmp (X+Y), (X+Z).
3300Value *Y, *Z;
3301if (A ==C) {
3302// C + B == C + D -> B == D
3303Y =B;
3304 Z =D;
3305 }elseif (A ==D) {
3306// D + B == C + D -> B == C
3307Y =B;
3308 Z =C;
3309 }elseif (B ==C) {
3310// A + C == C + D -> A == D
3311Y =A;
3312 Z =D;
3313 }else {
3314assert(B ==D);
3315// A + D == C + D -> A == C
3316Y =A;
3317 Z =C;
3318 }
3319if (Value *V =simplifyICmpInst(Pred,Y, Z, Q, MaxRecurse - 1))
3320return V;
3321 }
3322 }
3323
3324if (LBO)
3325if (Value *V =simplifyICmpWithBinOpOnLHS(Pred, LBO,RHS, Q, MaxRecurse))
3326return V;
3327
3328if (RBO)
3329if (Value *V =simplifyICmpWithBinOpOnLHS(
3330 ICmpInst::getSwappedPredicate(Pred), RBO,LHS, Q, MaxRecurse))
3331return V;
3332
3333// 0 - (zext X) pred C
3334if (!CmpInst::isUnsigned(Pred) &&match(LHS,m_Neg(m_ZExt(m_Value())))) {
3335constAPInt *C;
3336if (match(RHS,m_APInt(C))) {
3337if (C->isStrictlyPositive()) {
3338if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3339returnConstantInt::getTrue(getCompareTy(RHS));
3340if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3341returnConstantInt::getFalse(getCompareTy(RHS));
3342 }
3343if (C->isNonNegative()) {
3344if (Pred == ICmpInst::ICMP_SLE)
3345returnConstantInt::getTrue(getCompareTy(RHS));
3346if (Pred == ICmpInst::ICMP_SGT)
3347returnConstantInt::getFalse(getCompareTy(RHS));
3348 }
3349 }
3350 }
3351
3352// If C2 is a power-of-2 and C is not:
3353// (C2 << X) == C --> false
3354// (C2 << X) != C --> true
3355constAPInt *C;
3356if (match(LHS,m_Shl(m_Power2(),m_Value())) &&
3357match(RHS,m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
3358// C2 << X can equal zero in some circumstances.
3359// This simplification might be unsafe if C is zero.
3360//
3361// We know it is safe if:
3362// - The shift is nsw. We can't shift out the one bit.
3363// - The shift is nuw. We can't shift out the one bit.
3364// - C2 is one.
3365// - C isn't zero.
3366if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3367 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3368match(LHS,m_Shl(m_One(),m_Value())) || !C->isZero()) {
3369if (Pred == ICmpInst::ICMP_EQ)
3370returnConstantInt::getFalse(getCompareTy(RHS));
3371if (Pred == ICmpInst::ICMP_NE)
3372returnConstantInt::getTrue(getCompareTy(RHS));
3373 }
3374 }
3375
3376// If C is a power-of-2:
3377// (C << X) >u 0x8000 --> false
3378// (C << X) <=u 0x8000 --> true
3379if (match(LHS,m_Shl(m_Power2(),m_Value())) &&match(RHS,m_SignMask())) {
3380if (Pred == ICmpInst::ICMP_UGT)
3381returnConstantInt::getFalse(getCompareTy(RHS));
3382if (Pred == ICmpInst::ICMP_ULE)
3383returnConstantInt::getTrue(getCompareTy(RHS));
3384 }
3385
3386if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3387returnnullptr;
3388
3389if (LBO->getOperand(0) == RBO->getOperand(0)) {
3390switch (LBO->getOpcode()) {
3391default:
3392break;
3393case Instruction::Shl: {
3394bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3395bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3396if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3397 !isKnownNonZero(LBO->getOperand(0), Q))
3398break;
3399if (Value *V =simplifyICmpInst(Pred, LBO->getOperand(1),
3400 RBO->getOperand(1), Q, MaxRecurse - 1))
3401return V;
3402break;
3403 }
3404// If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3405// icmp ule A, B -> true
3406// icmp ugt A, B -> false
3407// icmp sle A, B -> true (C1 and C2 are the same sign)
3408// icmp sgt A, B -> false (C1 and C2 are the same sign)
3409case Instruction::And:
3410case Instruction::Or: {
3411constAPInt *C1, *C2;
3412if (ICmpInst::isRelational(Pred) &&
3413match(LBO->getOperand(1),m_APInt(C1)) &&
3414match(RBO->getOperand(1),m_APInt(C2))) {
3415if (!C1->isSubsetOf(*C2)) {
3416std::swap(C1, C2);
3417 Pred = ICmpInst::getSwappedPredicate(Pred);
3418 }
3419if (C1->isSubsetOf(*C2)) {
3420if (Pred == ICmpInst::ICMP_ULE)
3421returnConstantInt::getTrue(getCompareTy(LHS));
3422if (Pred == ICmpInst::ICMP_UGT)
3423returnConstantInt::getFalse(getCompareTy(LHS));
3424if (C1->isNonNegative() == C2->isNonNegative()) {
3425if (Pred == ICmpInst::ICMP_SLE)
3426returnConstantInt::getTrue(getCompareTy(LHS));
3427if (Pred == ICmpInst::ICMP_SGT)
3428returnConstantInt::getFalse(getCompareTy(LHS));
3429 }
3430 }
3431 }
3432break;
3433 }
3434 }
3435 }
3436
3437if (LBO->getOperand(1) == RBO->getOperand(1)) {
3438switch (LBO->getOpcode()) {
3439default:
3440break;
3441case Instruction::UDiv:
3442case Instruction::LShr:
3443if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3444 !Q.IIQ.isExact(RBO))
3445break;
3446if (Value *V =simplifyICmpInst(Pred, LBO->getOperand(0),
3447 RBO->getOperand(0), Q, MaxRecurse - 1))
3448return V;
3449break;
3450case Instruction::SDiv:
3451if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3452 !Q.IIQ.isExact(RBO))
3453break;
3454if (Value *V =simplifyICmpInst(Pred, LBO->getOperand(0),
3455 RBO->getOperand(0), Q, MaxRecurse - 1))
3456return V;
3457break;
3458case Instruction::AShr:
3459if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3460break;
3461if (Value *V =simplifyICmpInst(Pred, LBO->getOperand(0),
3462 RBO->getOperand(0), Q, MaxRecurse - 1))
3463return V;
3464break;
3465case Instruction::Shl: {
3466bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3467bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3468if (!NUW && !NSW)
3469break;
3470if (!NSW && ICmpInst::isSigned(Pred))
3471break;
3472if (Value *V =simplifyICmpInst(Pred, LBO->getOperand(0),
3473 RBO->getOperand(0), Q, MaxRecurse - 1))
3474return V;
3475break;
3476 }
3477 }
3478 }
3479returnnullptr;
3480}
3481
3482/// simplify integer comparisons where at least one operand of the compare
3483/// matches an integer min/max idiom.
3484staticValue *simplifyICmpWithMinMax(CmpPredicate Pred,Value *LHS,Value *RHS,
3485constSimplifyQuery &Q,
3486unsigned MaxRecurse) {
3487Type *ITy =getCompareTy(LHS);// The return type.
3488Value *A, *B;
3489CmpInst::PredicateP =CmpInst::BAD_ICMP_PREDICATE;
3490CmpInst::Predicate EqP;// Chosen so that "A == max/min(A,B)" iff "A EqP B".
3491
3492// Signed variants on "max(a,b)>=a -> true".
3493if (match(LHS,m_SMax(m_Value(A),m_Value(B))) && (A ==RHS ||B ==RHS)) {
3494if (A !=RHS)
3495std::swap(A,B);// smax(A, B) pred A.
3496 EqP =CmpInst::ICMP_SGE;// "A == smax(A, B)" iff "A sge B".
3497// We analyze this as smax(A, B) pred A.
3498P = Pred;
3499 }elseif (match(RHS,m_SMax(m_Value(A),m_Value(B))) &&
3500 (A ==LHS ||B ==LHS)) {
3501if (A !=LHS)
3502std::swap(A,B);// A pred smax(A, B).
3503 EqP =CmpInst::ICMP_SGE;// "A == smax(A, B)" iff "A sge B".
3504// We analyze this as smax(A, B) swapped-pred A.
3505P =CmpInst::getSwappedPredicate(Pred);
3506 }elseif (match(LHS,m_SMin(m_Value(A),m_Value(B))) &&
3507 (A ==RHS ||B ==RHS)) {
3508if (A !=RHS)
3509std::swap(A,B);// smin(A, B) pred A.
3510 EqP =CmpInst::ICMP_SLE;// "A == smin(A, B)" iff "A sle B".
3511// We analyze this as smax(-A, -B) swapped-pred -A.
3512// Note that we do not need to actually form -A or -B thanks to EqP.
3513P =CmpInst::getSwappedPredicate(Pred);
3514 }elseif (match(RHS,m_SMin(m_Value(A),m_Value(B))) &&
3515 (A ==LHS ||B ==LHS)) {
3516if (A !=LHS)
3517std::swap(A,B);// A pred smin(A, B).
3518 EqP =CmpInst::ICMP_SLE;// "A == smin(A, B)" iff "A sle B".
3519// We analyze this as smax(-A, -B) pred -A.
3520// Note that we do not need to actually form -A or -B thanks to EqP.
3521P = Pred;
3522 }
3523if (P !=CmpInst::BAD_ICMP_PREDICATE) {
3524// Cases correspond to "max(A, B) p A".
3525switch (P) {
3526default:
3527break;
3528caseCmpInst::ICMP_EQ:
3529caseCmpInst::ICMP_SLE:
3530// Equivalent to "A EqP B". This may be the same as the condition tested
3531// in the max/min; if so, we can just return that.
3532if (Value *V =extractEquivalentCondition(LHS, EqP,A,B))
3533return V;
3534if (Value *V =extractEquivalentCondition(RHS, EqP,A,B))
3535return V;
3536// Otherwise, see if "A EqP B" simplifies.
3537if (MaxRecurse)
3538if (Value *V =simplifyICmpInst(EqP,A,B, Q, MaxRecurse - 1))
3539return V;
3540break;
3541caseCmpInst::ICMP_NE:
3542caseCmpInst::ICMP_SGT: {
3543CmpInst::Predicate InvEqP =CmpInst::getInversePredicate(EqP);
3544// Equivalent to "A InvEqP B". This may be the same as the condition
3545// tested in the max/min; if so, we can just return that.
3546if (Value *V =extractEquivalentCondition(LHS, InvEqP,A,B))
3547return V;
3548if (Value *V =extractEquivalentCondition(RHS, InvEqP,A,B))
3549return V;
3550// Otherwise, see if "A InvEqP B" simplifies.
3551if (MaxRecurse)
3552if (Value *V =simplifyICmpInst(InvEqP,A,B, Q, MaxRecurse - 1))
3553return V;
3554break;
3555 }
3556caseCmpInst::ICMP_SGE:
3557// Always true.
3558returngetTrue(ITy);
3559caseCmpInst::ICMP_SLT:
3560// Always false.
3561returngetFalse(ITy);
3562 }
3563 }
3564
3565// Unsigned variants on "max(a,b)>=a -> true".
3566P =CmpInst::BAD_ICMP_PREDICATE;
3567if (match(LHS,m_UMax(m_Value(A),m_Value(B))) && (A ==RHS ||B ==RHS)) {
3568if (A !=RHS)
3569std::swap(A,B);// umax(A, B) pred A.
3570 EqP =CmpInst::ICMP_UGE;// "A == umax(A, B)" iff "A uge B".
3571// We analyze this as umax(A, B) pred A.
3572P = Pred;
3573 }elseif (match(RHS,m_UMax(m_Value(A),m_Value(B))) &&
3574 (A ==LHS ||B ==LHS)) {
3575if (A !=LHS)
3576std::swap(A,B);// A pred umax(A, B).
3577 EqP =CmpInst::ICMP_UGE;// "A == umax(A, B)" iff "A uge B".
3578// We analyze this as umax(A, B) swapped-pred A.
3579P =CmpInst::getSwappedPredicate(Pred);
3580 }elseif (match(LHS,m_UMin(m_Value(A),m_Value(B))) &&
3581 (A ==RHS ||B ==RHS)) {
3582if (A !=RHS)
3583std::swap(A,B);// umin(A, B) pred A.
3584 EqP =CmpInst::ICMP_ULE;// "A == umin(A, B)" iff "A ule B".
3585// We analyze this as umax(-A, -B) swapped-pred -A.
3586// Note that we do not need to actually form -A or -B thanks to EqP.
3587P =CmpInst::getSwappedPredicate(Pred);
3588 }elseif (match(RHS,m_UMin(m_Value(A),m_Value(B))) &&
3589 (A ==LHS ||B ==LHS)) {
3590if (A !=LHS)
3591std::swap(A,B);// A pred umin(A, B).
3592 EqP =CmpInst::ICMP_ULE;// "A == umin(A, B)" iff "A ule B".
3593// We analyze this as umax(-A, -B) pred -A.
3594// Note that we do not need to actually form -A or -B thanks to EqP.
3595P = Pred;
3596 }
3597if (P !=CmpInst::BAD_ICMP_PREDICATE) {
3598// Cases correspond to "max(A, B) p A".
3599switch (P) {
3600default:
3601break;
3602caseCmpInst::ICMP_EQ:
3603caseCmpInst::ICMP_ULE:
3604// Equivalent to "A EqP B". This may be the same as the condition tested
3605// in the max/min; if so, we can just return that.
3606if (Value *V =extractEquivalentCondition(LHS, EqP,A,B))
3607return V;
3608if (Value *V =extractEquivalentCondition(RHS, EqP,A,B))
3609return V;
3610// Otherwise, see if "A EqP B" simplifies.
3611if (MaxRecurse)
3612if (Value *V =simplifyICmpInst(EqP,A,B, Q, MaxRecurse - 1))
3613return V;
3614break;
3615caseCmpInst::ICMP_NE:
3616caseCmpInst::ICMP_UGT: {
3617CmpInst::Predicate InvEqP =CmpInst::getInversePredicate(EqP);
3618// Equivalent to "A InvEqP B". This may be the same as the condition
3619// tested in the max/min; if so, we can just return that.
3620if (Value *V =extractEquivalentCondition(LHS, InvEqP,A,B))
3621return V;
3622if (Value *V =extractEquivalentCondition(RHS, InvEqP,A,B))
3623return V;
3624// Otherwise, see if "A InvEqP B" simplifies.
3625if (MaxRecurse)
3626if (Value *V =simplifyICmpInst(InvEqP,A,B, Q, MaxRecurse - 1))
3627return V;
3628break;
3629 }
3630caseCmpInst::ICMP_UGE:
3631returngetTrue(ITy);
3632caseCmpInst::ICMP_ULT:
3633returngetFalse(ITy);
3634 }
3635 }
3636
3637// Comparing 1 each of min/max with a common operand?
3638// Canonicalize min operand to RHS.
3639if (match(LHS,m_UMin(m_Value(),m_Value())) ||
3640match(LHS,m_SMin(m_Value(),m_Value()))) {
3641std::swap(LHS,RHS);
3642 Pred = ICmpInst::getSwappedPredicate(Pred);
3643 }
3644
3645Value *C, *D;
3646if (match(LHS,m_SMax(m_Value(A),m_Value(B))) &&
3647match(RHS,m_SMin(m_Value(C),m_Value(D))) &&
3648 (A ==C ||A ==D ||B ==C ||B ==D)) {
3649// smax(A, B) >=s smin(A, D) --> true
3650if (Pred ==CmpInst::ICMP_SGE)
3651returngetTrue(ITy);
3652// smax(A, B) <s smin(A, D) --> false
3653if (Pred ==CmpInst::ICMP_SLT)
3654returngetFalse(ITy);
3655 }elseif (match(LHS,m_UMax(m_Value(A),m_Value(B))) &&
3656match(RHS,m_UMin(m_Value(C),m_Value(D))) &&
3657 (A ==C ||A ==D ||B ==C ||B ==D)) {
3658// umax(A, B) >=u umin(A, D) --> true
3659if (Pred ==CmpInst::ICMP_UGE)
3660returngetTrue(ITy);
3661// umax(A, B) <u umin(A, D) --> false
3662if (Pred ==CmpInst::ICMP_ULT)
3663returngetFalse(ITy);
3664 }
3665
3666returnnullptr;
3667}
3668
3669staticValue *simplifyICmpWithDominatingAssume(CmpPredicatePredicate,
3670Value *LHS,Value *RHS,
3671constSimplifyQuery &Q) {
3672// Gracefully handle instructions that have not been inserted yet.
3673if (!Q.AC || !Q.CxtI)
3674returnnullptr;
3675
3676for (Value *AssumeBaseOp : {LHS,RHS}) {
3677for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3678if (!AssumeVH)
3679continue;
3680
3681CallInst *Assume = cast<CallInst>(AssumeVH);
3682if (std::optional<bool> Imp =isImpliedCondition(
3683 Assume->getArgOperand(0),Predicate,LHS,RHS, Q.DL))
3684if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3685return ConstantInt::get(getCompareTy(LHS), *Imp);
3686 }
3687 }
3688
3689returnnullptr;
3690}
3691
3692staticValue *simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred,Value *LHS,
3693Value *RHS) {
3694auto *II = dyn_cast<IntrinsicInst>(LHS);
3695if (!II)
3696returnnullptr;
3697
3698switch (II->getIntrinsicID()) {
3699case Intrinsic::uadd_sat:
3700// uadd.sat(X, Y) uge X + Y
3701if (match(RHS,m_c_Add(m_Specific(II->getArgOperand(0)),
3702m_Specific(II->getArgOperand(1))))) {
3703if (Pred == ICmpInst::ICMP_UGE)
3704returnConstantInt::getTrue(getCompareTy(II));
3705if (Pred == ICmpInst::ICMP_ULT)
3706returnConstantInt::getFalse(getCompareTy(II));
3707 }
3708returnnullptr;
3709case Intrinsic::usub_sat:
3710// usub.sat(X, Y) ule X - Y
3711if (match(RHS,m_Sub(m_Specific(II->getArgOperand(0)),
3712m_Specific(II->getArgOperand(1))))) {
3713if (Pred == ICmpInst::ICMP_ULE)
3714returnConstantInt::getTrue(getCompareTy(II));
3715if (Pred == ICmpInst::ICMP_UGT)
3716returnConstantInt::getFalse(getCompareTy(II));
3717 }
3718returnnullptr;
3719default:
3720returnnullptr;
3721 }
3722}
3723
3724/// Helper method to get range from metadata or attribute.
3725static std::optional<ConstantRange>getRange(Value *V,
3726constInstrInfoQuery &IIQ) {
3727if (Instruction *I = dyn_cast<Instruction>(V))
3728if (MDNode *MD = IIQ.getMetadata(I, LLVMContext::MD_range))
3729returngetConstantRangeFromMetadata(*MD);
3730
3731if (constArgument *A = dyn_cast<Argument>(V))
3732returnA->getRange();
3733elseif (constCallBase *CB = dyn_cast<CallBase>(V))
3734return CB->getRange();
3735
3736return std::nullopt;
3737}
3738
3739/// Given operands for an ICmpInst, see if we can fold the result.
3740/// If not, this returns null.
3741staticValue *simplifyICmpInst(CmpPredicate Pred,Value *LHS,Value *RHS,
3742constSimplifyQuery &Q,unsigned MaxRecurse) {
3743assert(CmpInst::isIntPredicate(Pred) &&"Not an integer compare!");
3744
3745if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3746if (Constant *CRHS = dyn_cast<Constant>(RHS))
3747returnConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3748
3749// If we have a constant, make sure it is on the RHS.
3750std::swap(LHS,RHS);
3751 Pred =CmpInst::getSwappedPredicate(Pred);
3752 }
3753assert(!isa<UndefValue>(LHS) &&"Unexpected icmp undef,%X");
3754
3755Type *ITy =getCompareTy(LHS);// The return type.
3756
3757// icmp poison, X -> poison
3758if (isa<PoisonValue>(RHS))
3759returnPoisonValue::get(ITy);
3760
3761// For EQ and NE, we can always pick a value for the undef to make the
3762// predicate pass or fail, so we can return undef.
3763// Matches behavior in llvm::ConstantFoldCompareInstruction.
3764if (Q.isUndefValue(RHS) &&ICmpInst::isEquality(Pred))
3765returnUndefValue::get(ITy);
3766
3767// icmp X, X -> true/false
3768// icmp X, undef -> true/false because undef could be X.
3769if (LHS ==RHS || Q.isUndefValue(RHS))
3770return ConstantInt::get(ITy,CmpInst::isTrueWhenEqual(Pred));
3771
3772if (Value *V =simplifyICmpOfBools(Pred,LHS,RHS, Q))
3773return V;
3774
3775// TODO: Sink/common this with other potentially expensive calls that use
3776// ValueTracking? See comment below for isKnownNonEqual().
3777if (Value *V =simplifyICmpWithZero(Pred,LHS,RHS, Q))
3778return V;
3779
3780if (Value *V =simplifyICmpWithConstant(Pred,LHS,RHS, Q.IIQ))
3781return V;
3782
3783// If both operands have range metadata, use the metadata
3784// to simplify the comparison.
3785if (std::optional<ConstantRange> RhsCr =getRange(RHS, Q.IIQ))
3786if (std::optional<ConstantRange> LhsCr =getRange(LHS, Q.IIQ)) {
3787if (LhsCr->icmp(Pred, *RhsCr))
3788returnConstantInt::getTrue(ITy);
3789
3790if (LhsCr->icmp(CmpInst::getInversePredicate(Pred), *RhsCr))
3791returnConstantInt::getFalse(ITy);
3792 }
3793
3794// Compare of cast, for example (zext X) != 0 -> X != 0
3795if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3796Instruction *LI = cast<CastInst>(LHS);
3797Value *SrcOp = LI->getOperand(0);
3798Type *SrcTy =SrcOp->getType();
3799Type *DstTy = LI->getType();
3800
3801// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3802// if the integer type is the same size as the pointer type.
3803if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3804 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3805if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3806// Transfer the cast to the constant.
3807if (Value *V =simplifyICmpInst(Pred,SrcOp,
3808ConstantExpr::getIntToPtr(RHSC, SrcTy),
3809 Q, MaxRecurse - 1))
3810return V;
3811 }elseif (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3812if (RI->getOperand(0)->getType() == SrcTy)
3813// Compare without the cast.
3814if (Value *V =simplifyICmpInst(Pred,SrcOp, RI->getOperand(0), Q,
3815 MaxRecurse - 1))
3816return V;
3817 }
3818 }
3819
3820if (isa<ZExtInst>(LHS)) {
3821// Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3822// same type.
3823if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3824if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3825// Compare X and Y. Note that signed predicates become unsigned.
3826if (Value *V =
3827simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),SrcOp,
3828 RI->getOperand(0), Q, MaxRecurse - 1))
3829return V;
3830 }
3831// Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3832elseif (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3833if (SrcOp == RI->getOperand(0)) {
3834if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3835returnConstantInt::getTrue(ITy);
3836if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3837returnConstantInt::getFalse(ITy);
3838 }
3839 }
3840// Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3841// too. If not, then try to deduce the result of the comparison.
3842elseif (match(RHS,m_ImmConstant())) {
3843Constant *C = dyn_cast<Constant>(RHS);
3844assert(C !=nullptr);
3845
3846// Compute the constant that would happen if we truncated to SrcTy then
3847// reextended to DstTy.
3848Constant *Trunc =
3849ConstantFoldCastOperand(Instruction::Trunc,C, SrcTy, Q.DL);
3850assert(Trunc &&"Constant-fold of ImmConstant should not fail");
3851Constant *RExt =
3852ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3853assert(RExt &&"Constant-fold of ImmConstant should not fail");
3854Constant *AnyEq =
3855ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt,C, Q.DL);
3856assert(AnyEq &&"Constant-fold of ImmConstant should not fail");
3857
3858// If the re-extended constant didn't change any of the elements then
3859// this is effectively also a case of comparing two zero-extended
3860// values.
3861if (AnyEq->isAllOnesValue() && MaxRecurse)
3862if (Value *V =simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3863SrcOp, Trunc, Q, MaxRecurse - 1))
3864return V;
3865
3866// Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3867// there. Use this to work out the result of the comparison.
3868if (AnyEq->isNullValue()) {
3869switch (Pred) {
3870default:
3871llvm_unreachable("Unknown ICmp predicate!");
3872// LHS <u RHS.
3873case ICmpInst::ICMP_EQ:
3874case ICmpInst::ICMP_UGT:
3875case ICmpInst::ICMP_UGE:
3876returnConstant::getNullValue(ITy);
3877
3878case ICmpInst::ICMP_NE:
3879case ICmpInst::ICMP_ULT:
3880case ICmpInst::ICMP_ULE:
3881returnConstant::getAllOnesValue(ITy);
3882
3883// LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3884// is non-negative then LHS <s RHS.
3885case ICmpInst::ICMP_SGT:
3886case ICmpInst::ICMP_SGE:
3887returnConstantFoldCompareInstOperands(
3888 ICmpInst::ICMP_SLT,C,Constant::getNullValue(C->getType()),
3889 Q.DL);
3890case ICmpInst::ICMP_SLT:
3891case ICmpInst::ICMP_SLE:
3892returnConstantFoldCompareInstOperands(
3893 ICmpInst::ICMP_SGE,C,Constant::getNullValue(C->getType()),
3894 Q.DL);
3895 }
3896 }
3897 }
3898 }
3899
3900if (isa<SExtInst>(LHS)) {
3901// Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3902// same type.
3903if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3904if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3905// Compare X and Y. Note that the predicate does not change.
3906if (Value *V =simplifyICmpInst(Pred,SrcOp, RI->getOperand(0), Q,
3907 MaxRecurse - 1))
3908return V;
3909 }
3910// Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3911elseif (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3912if (SrcOp == RI->getOperand(0)) {
3913if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3914returnConstantInt::getTrue(ITy);
3915if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3916returnConstantInt::getFalse(ITy);
3917 }
3918 }
3919// Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3920// too. If not, then try to deduce the result of the comparison.
3921elseif (match(RHS,m_ImmConstant())) {
3922Constant *C = cast<Constant>(RHS);
3923
3924// Compute the constant that would happen if we truncated to SrcTy then
3925// reextended to DstTy.
3926Constant *Trunc =
3927ConstantFoldCastOperand(Instruction::Trunc,C, SrcTy, Q.DL);
3928assert(Trunc &&"Constant-fold of ImmConstant should not fail");
3929Constant *RExt =
3930ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
3931assert(RExt &&"Constant-fold of ImmConstant should not fail");
3932Constant *AnyEq =
3933ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt,C, Q.DL);
3934assert(AnyEq &&"Constant-fold of ImmConstant should not fail");
3935
3936// If the re-extended constant didn't change then this is effectively
3937// also a case of comparing two sign-extended values.
3938if (AnyEq->isAllOnesValue() && MaxRecurse)
3939if (Value *V =
3940simplifyICmpInst(Pred,SrcOp, Trunc, Q, MaxRecurse - 1))
3941return V;
3942
3943// Otherwise the upper bits of LHS are all equal, while RHS has varying
3944// bits there. Use this to work out the result of the comparison.
3945if (AnyEq->isNullValue()) {
3946switch (Pred) {
3947default:
3948llvm_unreachable("Unknown ICmp predicate!");
3949case ICmpInst::ICMP_EQ:
3950returnConstant::getNullValue(ITy);
3951case ICmpInst::ICMP_NE:
3952returnConstant::getAllOnesValue(ITy);
3953
3954// If RHS is non-negative then LHS <s RHS. If RHS is negative then
3955// LHS >s RHS.
3956case ICmpInst::ICMP_SGT:
3957case ICmpInst::ICMP_SGE:
3958returnConstantFoldCompareInstOperands(
3959 ICmpInst::ICMP_SLT,C,Constant::getNullValue(C->getType()),
3960 Q.DL);
3961case ICmpInst::ICMP_SLT:
3962case ICmpInst::ICMP_SLE:
3963returnConstantFoldCompareInstOperands(
3964 ICmpInst::ICMP_SGE,C,Constant::getNullValue(C->getType()),
3965 Q.DL);
3966
3967// If LHS is non-negative then LHS <u RHS. If LHS is negative then
3968// LHS >u RHS.
3969case ICmpInst::ICMP_UGT:
3970case ICmpInst::ICMP_UGE:
3971// Comparison is true iff the LHS <s 0.
3972if (MaxRecurse)
3973if (Value *V =simplifyICmpInst(ICmpInst::ICMP_SLT,SrcOp,
3974Constant::getNullValue(SrcTy), Q,
3975 MaxRecurse - 1))
3976return V;
3977break;
3978case ICmpInst::ICMP_ULT:
3979case ICmpInst::ICMP_ULE:
3980// Comparison is true iff the LHS >=s 0.
3981if (MaxRecurse)
3982if (Value *V =simplifyICmpInst(ICmpInst::ICMP_SGE,SrcOp,
3983Constant::getNullValue(SrcTy), Q,
3984 MaxRecurse - 1))
3985return V;
3986break;
3987 }
3988 }
3989 }
3990 }
3991 }
3992
3993// icmp eq|ne X, Y -> false|true if X != Y
3994// This is potentially expensive, and we have already computedKnownBits for
3995// compares with 0 above here, so only try this for a non-zero compare.
3996if (ICmpInst::isEquality(Pred) && !match(RHS,m_Zero()) &&
3997isKnownNonEqual(LHS,RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3998return Pred == ICmpInst::ICMP_NE ?getTrue(ITy) :getFalse(ITy);
3999 }
4000
4001if (Value *V =simplifyICmpWithBinOp(Pred,LHS,RHS, Q, MaxRecurse))
4002return V;
4003
4004if (Value *V =simplifyICmpWithMinMax(Pred,LHS,RHS, Q, MaxRecurse))
4005return V;
4006
4007if (Value *V =simplifyICmpWithIntrinsicOnLHS(Pred,LHS,RHS))
4008return V;
4009if (Value *V =simplifyICmpWithIntrinsicOnLHS(
4010 ICmpInst::getSwappedPredicate(Pred),RHS,LHS))
4011return V;
4012
4013if (Value *V =simplifyICmpUsingMonotonicValues(Pred,LHS,RHS))
4014return V;
4015if (Value *V =simplifyICmpUsingMonotonicValues(
4016 ICmpInst::getSwappedPredicate(Pred),RHS,LHS))
4017return V;
4018
4019if (Value *V =simplifyICmpWithDominatingAssume(Pred,LHS,RHS, Q))
4020return V;
4021
4022if (std::optional<bool> Res =
4023isImpliedByDomCondition(Pred,LHS,RHS, Q.CxtI, Q.DL))
4024returnConstantInt::getBool(ITy, *Res);
4025
4026// Simplify comparisons of related pointers using a powerful, recursive
4027// GEP-walk when we have target data available..
4028if (LHS->getType()->isPointerTy())
4029if (auto *C =computePointerICmp(Pred,LHS,RHS, Q))
4030returnC;
4031if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
4032if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
4033if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4034 Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
4035 Q.DL.getTypeSizeInBits(CLHS->getType()))
4036if (auto *C =computePointerICmp(Pred, CLHS->getPointerOperand(),
4037 CRHS->getPointerOperand(), Q))
4038returnC;
4039
4040// If the comparison is with the result of a select instruction, check whether
4041// comparing with either branch of the select always yields the same value.
4042if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4043if (Value *V =threadCmpOverSelect(Pred,LHS,RHS, Q, MaxRecurse))
4044return V;
4045
4046// If the comparison is with the result of a phi instruction, check whether
4047// doing the compare with each incoming phi value yields a common result.
4048if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4049if (Value *V =threadCmpOverPHI(Pred,LHS,RHS, Q, MaxRecurse))
4050return V;
4051
4052returnnullptr;
4053}
4054
4055Value *llvm::simplifyICmpInst(CmpPredicate Predicate,Value *LHS,Value *RHS,
4056constSimplifyQuery &Q) {
4057 return ::simplifyICmpInst(Predicate,LHS,RHS, Q,RecursionLimit);
4058}
4059
4060/// Given operands for an FCmpInst, see if we can fold the result.
4061/// If not, this returns null.
4062staticValue *simplifyFCmpInst(CmpPredicate Pred,Value *LHS,Value *RHS,
4063FastMathFlags FMF,constSimplifyQuery &Q,
4064unsigned MaxRecurse) {
4065assert(CmpInst::isFPPredicate(Pred) &&"Not an FP compare!");
4066
4067if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4068if (Constant *CRHS = dyn_cast<Constant>(RHS))
4069returnConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4070 Q.CxtI);
4071
4072// If we have a constant, make sure it is on the RHS.
4073std::swap(LHS,RHS);
4074 Pred =CmpInst::getSwappedPredicate(Pred);
4075 }
4076
4077// Fold trivial predicates.
4078Type *RetTy =getCompareTy(LHS);
4079if (Pred == FCmpInst::FCMP_FALSE)
4080returngetFalse(RetTy);
4081if (Pred == FCmpInst::FCMP_TRUE)
4082returngetTrue(RetTy);
4083
4084// fcmp pred x, poison and fcmp pred poison, x
4085// fold to poison
4086if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
4087returnPoisonValue::get(RetTy);
4088
4089// fcmp pred x, undef and fcmp pred undef, x
4090// fold to true if unordered, false if ordered
4091if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4092// Choosing NaN for the undef will always make unordered comparison succeed
4093// and ordered comparison fail.
4094return ConstantInt::get(RetTy,CmpInst::isUnordered(Pred));
4095 }
4096
4097// fcmp x,x -> true/false. Not all compares are foldable.
4098if (LHS ==RHS) {
4099if (CmpInst::isTrueWhenEqual(Pred))
4100returngetTrue(RetTy);
4101if (CmpInst::isFalseWhenEqual(Pred))
4102returngetFalse(RetTy);
4103 }
4104
4105// Fold (un)ordered comparison if we can determine there are no NaNs.
4106//
4107// This catches the 2 variable input case, constants are handled below as a
4108// class-like compare.
4109if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4110KnownFPClass RHSClass =
4111computeKnownFPClass(RHS,fcAllFlags,/*Depth=*/0, Q);
4112KnownFPClass LHSClass =
4113computeKnownFPClass(LHS,fcAllFlags,/*Depth=*/0, Q);
4114
4115if (FMF.noNaNs() ||
4116 (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
4117return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4118
4119if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
4120return ConstantInt::get(RetTy, Pred ==CmpInst::FCMP_UNO);
4121 }
4122
4123constAPFloat *C =nullptr;
4124match(RHS,m_APFloatAllowPoison(C));
4125 std::optional<KnownFPClass> FullKnownClassLHS;
4126
4127// Lazily compute the possible classes for LHS. Avoid computing it twice if
4128// RHS is a 0.
4129auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4130fcAllFlags) {
4131if (FullKnownClassLHS)
4132return *FullKnownClassLHS;
4133returncomputeKnownFPClass(LHS, FMF, InterestedFlags, 0, Q);
4134 };
4135
4136if (C && Q.CxtI) {
4137// Fold out compares that express a class test.
4138//
4139// FIXME: Should be able to perform folds without context
4140// instruction. Always pass in the context function?
4141
4142constFunction *ParentF = Q.CxtI->getFunction();
4143auto [ClassVal, ClassTest] =fcmpToClassTest(Pred, *ParentF,LHS,C);
4144if (ClassVal) {
4145 FullKnownClassLHS = computeLHSClass();
4146if ((FullKnownClassLHS->KnownFPClasses & ClassTest) ==fcNone)
4147returngetFalse(RetTy);
4148if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) ==fcNone)
4149returngetTrue(RetTy);
4150 }
4151 }
4152
4153// Handle fcmp with constant RHS.
4154if (C) {
4155// TODO: If we always required a context function, we wouldn't need to
4156// special case nans.
4157if (C->isNaN())
4158return ConstantInt::get(RetTy,CmpInst::isUnordered(Pred));
4159
4160// TODO: Need version fcmpToClassTest which returns implied class when the
4161// compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4162// isn't implementable as a class call.
4163if (C->isNegative() && !C->isNegZero()) {
4164FPClassTest Interested =KnownFPClass::OrderedLessThanZeroMask;
4165
4166// TODO: We can catch more cases by using a range check rather than
4167// relying on CannotBeOrderedLessThanZero.
4168switch (Pred) {
4169case FCmpInst::FCMP_UGE:
4170case FCmpInst::FCMP_UGT:
4171case FCmpInst::FCMP_UNE: {
4172KnownFPClass KnownClass = computeLHSClass(Interested);
4173
4174// (X >= 0) implies (X > C) when (C < 0)
4175if (KnownClass.cannotBeOrderedLessThanZero())
4176returngetTrue(RetTy);
4177break;
4178 }
4179case FCmpInst::FCMP_OEQ:
4180case FCmpInst::FCMP_OLE:
4181case FCmpInst::FCMP_OLT: {
4182KnownFPClass KnownClass = computeLHSClass(Interested);
4183
4184// (X >= 0) implies !(X < C) when (C < 0)
4185if (KnownClass.cannotBeOrderedLessThanZero())
4186returngetFalse(RetTy);
4187break;
4188 }
4189default:
4190break;
4191 }
4192 }
4193// Check comparison of [minnum/maxnum with constant] with other constant.
4194constAPFloat *C2;
4195if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(),m_APFloat(C2))) &&
4196 *C2 < *C) ||
4197 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(),m_APFloat(C2))) &&
4198 *C2 > *C)) {
4199bool IsMaxNum =
4200 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4201// The ordered relationship and minnum/maxnum guarantee that we do not
4202// have NaN constants, so ordered/unordered preds are handled the same.
4203switch (Pred) {
4204case FCmpInst::FCMP_OEQ:
4205case FCmpInst::FCMP_UEQ:
4206// minnum(X, LesserC) == C --> false
4207// maxnum(X, GreaterC) == C --> false
4208returngetFalse(RetTy);
4209case FCmpInst::FCMP_ONE:
4210case FCmpInst::FCMP_UNE:
4211// minnum(X, LesserC) != C --> true
4212// maxnum(X, GreaterC) != C --> true
4213returngetTrue(RetTy);
4214case FCmpInst::FCMP_OGE:
4215case FCmpInst::FCMP_UGE:
4216case FCmpInst::FCMP_OGT:
4217case FCmpInst::FCMP_UGT:
4218// minnum(X, LesserC) >= C --> false
4219// minnum(X, LesserC) > C --> false
4220// maxnum(X, GreaterC) >= C --> true
4221// maxnum(X, GreaterC) > C --> true
4222return ConstantInt::get(RetTy, IsMaxNum);
4223case FCmpInst::FCMP_OLE:
4224case FCmpInst::FCMP_ULE:
4225case FCmpInst::FCMP_OLT:
4226case FCmpInst::FCMP_ULT:
4227// minnum(X, LesserC) <= C --> true
4228// minnum(X, LesserC) < C --> true
4229// maxnum(X, GreaterC) <= C --> false
4230// maxnum(X, GreaterC) < C --> false
4231return ConstantInt::get(RetTy, !IsMaxNum);
4232default:
4233// TRUE/FALSE/ORD/UNO should be handled before this.
4234llvm_unreachable("Unexpected fcmp predicate");
4235 }
4236 }
4237 }
4238
4239// TODO: Could fold this with above if there were a matcher which returned all
4240// classes in a non-splat vector.
4241if (match(RHS,m_AnyZeroFP())) {
4242switch (Pred) {
4243case FCmpInst::FCMP_OGE:
4244case FCmpInst::FCMP_ULT: {
4245FPClassTest Interested =KnownFPClass::OrderedLessThanZeroMask;
4246if (!FMF.noNaNs())
4247 Interested |=fcNan;
4248
4249KnownFPClass Known = computeLHSClass(Interested);
4250
4251// Positive or zero X >= 0.0 --> true
4252// Positive or zero X < 0.0 --> false
4253if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4254 Known.cannotBeOrderedLessThanZero())
4255return Pred == FCmpInst::FCMP_OGE ?getTrue(RetTy) :getFalse(RetTy);
4256break;
4257 }
4258case FCmpInst::FCMP_UGE:
4259case FCmpInst::FCMP_OLT: {
4260FPClassTest Interested =KnownFPClass::OrderedLessThanZeroMask;
4261KnownFPClass Known = computeLHSClass(Interested);
4262
4263// Positive or zero or nan X >= 0.0 --> true
4264// Positive or zero or nan X < 0.0 --> false
4265if (Known.cannotBeOrderedLessThanZero())
4266return Pred == FCmpInst::FCMP_UGE ?getTrue(RetTy) :getFalse(RetTy);
4267break;
4268 }
4269default:
4270break;
4271 }
4272 }
4273
4274// If the comparison is with the result of a select instruction, check whether
4275// comparing with either branch of the select always yields the same value.
4276if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4277if (Value *V =threadCmpOverSelect(Pred,LHS,RHS, Q, MaxRecurse))
4278return V;
4279
4280// If the comparison is with the result of a phi instruction, check whether
4281// doing the compare with each incoming phi value yields a common result.
4282if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4283if (Value *V =threadCmpOverPHI(Pred,LHS,RHS, Q, MaxRecurse))
4284return V;
4285
4286returnnullptr;
4287}
4288
4289Value *llvm::simplifyFCmpInst(CmpPredicate Predicate,Value *LHS,Value *RHS,
4290FastMathFlags FMF,constSimplifyQuery &Q) {
4291 return ::simplifyFCmpInst(Predicate,LHS,RHS, FMF, Q,RecursionLimit);
4292}
4293
4294staticValue *simplifyWithOpsReplaced(Value *V,
4295ArrayRef<std::pair<Value *, Value *>> Ops,
4296constSimplifyQuery &Q,
4297bool AllowRefinement,
4298SmallVectorImpl<Instruction *> *DropFlags,
4299unsigned MaxRecurse) {
4300assert((AllowRefinement || !Q.CanUseUndef) &&
4301"If AllowRefinement=false then CanUseUndef=false");
4302for (constauto &OpAndRepOp : Ops) {
4303// We cannot replace a constant, and shouldn't even try.
4304if (isa<Constant>(OpAndRepOp.first))
4305returnnullptr;
4306
4307// Trivial replacement.
4308if (V == OpAndRepOp.first)
4309return OpAndRepOp.second;
4310 }
4311
4312if (!MaxRecurse--)
4313returnnullptr;
4314
4315auto *I = dyn_cast<Instruction>(V);
4316if (!I)
4317returnnullptr;
4318
4319// The arguments of a phi node might refer to a value from a previous
4320// cycle iteration.
4321if (isa<PHINode>(I))
4322returnnullptr;
4323
4324// Don't fold away llvm.is.constant checks based on assumptions.
4325if (match(I, m_Intrinsic<Intrinsic::is_constant>()))
4326returnnullptr;
4327
4328// Don't simplify freeze.
4329if (isa<FreezeInst>(I))
4330returnnullptr;
4331
4332for (constauto &OpAndRepOp : Ops) {
4333// For vector types, the simplification must hold per-lane, so forbid
4334// potentially cross-lane operations like shufflevector.
4335if (OpAndRepOp.first->getType()->isVectorTy() &&
4336 !isNotCrossLaneOperation(I))
4337returnnullptr;
4338 }
4339
4340// Replace Op with RepOp in instruction operands.
4341SmallVector<Value *, 8> NewOps;
4342bool AnyReplaced =false;
4343for (Value *InstOp :I->operands()) {
4344if (Value *NewInstOp =simplifyWithOpsReplaced(
4345 InstOp, Ops, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4346 NewOps.push_back(NewInstOp);
4347 AnyReplaced = InstOp != NewInstOp;
4348 }else {
4349 NewOps.push_back(InstOp);
4350 }
4351
4352// Bail out if any operand is undef and SimplifyQuery disables undef
4353// simplification. Constant folding currently doesn't respect this option.
4354if (isa<UndefValue>(NewOps.back()) && !Q.CanUseUndef)
4355returnnullptr;
4356 }
4357
4358if (!AnyReplaced)
4359returnnullptr;
4360
4361if (!AllowRefinement) {
4362// General InstSimplify functions may refine the result, e.g. by returning
4363// a constant for a potentially poison value. To avoid this, implement only
4364// a few non-refining but profitable transforms here.
4365
4366if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4367unsigned Opcode = BO->getOpcode();
4368// id op x -> x, x op id -> x
4369// Exclude floats, because x op id may produce a different NaN value.
4370if (!BO->getType()->isFPOrFPVectorTy()) {
4371if (NewOps[0] ==ConstantExpr::getBinOpIdentity(Opcode,I->getType()))
4372return NewOps[1];
4373if (NewOps[1] ==ConstantExpr::getBinOpIdentity(Opcode,I->getType(),
4374/* RHS */true))
4375return NewOps[0];
4376 }
4377
4378// x & x -> x, x | x -> x
4379if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4380 NewOps[0] == NewOps[1]) {
4381// or disjoint x, x results in poison.
4382if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4383if (PDI->isDisjoint()) {
4384if (!DropFlags)
4385returnnullptr;
4386 DropFlags->push_back(BO);
4387 }
4388 }
4389return NewOps[0];
4390 }
4391
4392// x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4393// by assumption and this case never wraps, so nowrap flags can be
4394// ignored.
4395if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4396 NewOps[0] == NewOps[1] &&
4397any_of(Ops, [=](constauto &Rep) {return NewOps[0] == Rep.second; }))
4398returnConstant::getNullValue(I->getType());
4399
4400// If we are substituting an absorber constant into a binop and extra
4401// poison can't leak if we remove the select -- because both operands of
4402// the binop are based on the same value -- then it may be safe to replace
4403// the value with the absorber constant. Examples:
4404// (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4405// (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4406// (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4407Constant *Absorber =ConstantExpr::getBinOpAbsorber(Opcode,I->getType());
4408if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4409any_of(Ops,
4410 [=](constauto &Rep) {returnimpliesPoison(BO, Rep.first); }))
4411return Absorber;
4412 }
4413
4414if (isa<GetElementPtrInst>(I)) {
4415// getelementptr x, 0 -> x.
4416// This never returns poison, even if inbounds is set.
4417if (NewOps.size() == 2 &&match(NewOps[1],m_Zero()))
4418return NewOps[0];
4419 }
4420 }else {
4421// The simplification queries below may return the original value. Consider:
4422// %div = udiv i32 %arg, %arg2
4423// %mul = mul nsw i32 %div, %arg2
4424// %cmp = icmp eq i32 %mul, %arg
4425// %sel = select i1 %cmp, i32 %div, i32 undef
4426// Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4427// simplifies back to %arg. This can only happen because %mul does not
4428// dominate %div. To ensure a consistent return value contract, we make sure
4429// that this case returns nullptr as well.
4430auto PreventSelfSimplify = [V](Value *Simplified) {
4431return Simplified != V ? Simplified :nullptr;
4432 };
4433
4434return PreventSelfSimplify(
4435::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4436 }
4437
4438// If all operands are constant after substituting Op for RepOp then we can
4439// constant fold the instruction.
4440SmallVector<Constant *, 8> ConstOps;
4441for (Value *NewOp : NewOps) {
4442if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4443 ConstOps.push_back(ConstOp);
4444else
4445returnnullptr;
4446 }
4447
4448// Consider:
4449// %cmp = icmp eq i32 %x, 2147483647
4450// %add = add nsw i32 %x, 1
4451// %sel = select i1 %cmp, i32 -2147483648, i32 %add
4452//
4453// We can't replace %sel with %add unless we strip away the flags (which
4454// will be done in InstCombine).
4455// TODO: This may be unsound, because it only catches some forms of
4456// refinement.
4457if (!AllowRefinement) {
4458if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4459// abs cannot create poison if the value is known to never be int_min.
4460if (auto *II = dyn_cast<IntrinsicInst>(I);
4461II &&II->getIntrinsicID() == Intrinsic::abs) {
4462if (!ConstOps[0]->isNotMinSignedValue())
4463returnnullptr;
4464 }else
4465returnnullptr;
4466 }
4467Constant *Res =ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4468/*AllowNonDeterministic=*/false);
4469if (DropFlags && Res &&I->hasPoisonGeneratingAnnotations())
4470 DropFlags->push_back(I);
4471return Res;
4472 }
4473
4474returnConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4475/*AllowNonDeterministic=*/false);
4476}
4477
4478staticValue *simplifyWithOpReplaced(Value *V,Value *Op,Value *RepOp,
4479constSimplifyQuery &Q,
4480bool AllowRefinement,
4481SmallVectorImpl<Instruction *> *DropFlags,
4482unsigned MaxRecurse) {
4483returnsimplifyWithOpsReplaced(V, {{Op, RepOp}}, Q, AllowRefinement,
4484 DropFlags, MaxRecurse);
4485}
4486
4487Value *llvm::simplifyWithOpReplaced(Value *V,Value *Op,Value *RepOp,
4488constSimplifyQuery &Q,
4489bool AllowRefinement,
4490SmallVectorImpl<Instruction *> *DropFlags) {
4491// If refinement is disabled, also disable undef simplifications (which are
4492// always refinements) in SimplifyQuery.
4493if (!AllowRefinement)
4494 return ::simplifyWithOpReplaced(V,Op, RepOp, Q.getWithoutUndef(),
4495 AllowRefinement, DropFlags,RecursionLimit);
4496 return ::simplifyWithOpReplaced(V,Op, RepOp, Q, AllowRefinement, DropFlags,
4497RecursionLimit);
4498}
4499
4500/// Try to simplify a select instruction when its condition operand is an
4501/// integer comparison where one operand of the compare is a constant.
4502staticValue *simplifySelectBitTest(Value *TrueVal,Value *FalseVal,Value *X,
4503constAPInt *Y,bool TrueWhenUnset) {
4504constAPInt *C;
4505
4506// (X & Y) == 0 ? X & ~Y : X --> X
4507// (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4508if (FalseVal ==X &&match(TrueVal,m_And(m_Specific(X),m_APInt(C))) &&
4509 *Y == ~*C)
4510return TrueWhenUnset ? FalseVal : TrueVal;
4511
4512// (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4513// (X & Y) != 0 ? X : X & ~Y --> X
4514if (TrueVal ==X &&match(FalseVal,m_And(m_Specific(X),m_APInt(C))) &&
4515 *Y == ~*C)
4516return TrueWhenUnset ? FalseVal : TrueVal;
4517
4518if (Y->isPowerOf2()) {
4519// (X & Y) == 0 ? X | Y : X --> X | Y
4520// (X & Y) != 0 ? X | Y : X --> X
4521if (FalseVal ==X &&match(TrueVal,m_Or(m_Specific(X),m_APInt(C))) &&
4522 *Y == *C) {
4523// We can't return the or if it has the disjoint flag.
4524if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4525returnnullptr;
4526return TrueWhenUnset ? TrueVal : FalseVal;
4527 }
4528
4529// (X & Y) == 0 ? X : X | Y --> X
4530// (X & Y) != 0 ? X : X | Y --> X | Y
4531if (TrueVal ==X &&match(FalseVal,m_Or(m_Specific(X),m_APInt(C))) &&
4532 *Y == *C) {
4533// We can't return the or if it has the disjoint flag.
4534if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4535returnnullptr;
4536return TrueWhenUnset ? TrueVal : FalseVal;
4537 }
4538 }
4539
4540returnnullptr;
4541}
4542
4543staticValue *simplifyCmpSelOfMaxMin(Value *CmpLHS,Value *CmpRHS,
4544CmpPredicate Pred,Value *TVal,
4545Value *FVal) {
4546// Canonicalize common cmp+sel operand as CmpLHS.
4547if (CmpRHS == TVal || CmpRHS == FVal) {
4548std::swap(CmpLHS, CmpRHS);
4549 Pred = ICmpInst::getSwappedPredicate(Pred);
4550 }
4551
4552// Canonicalize common cmp+sel operand as TVal.
4553if (CmpLHS == FVal) {
4554std::swap(TVal, FVal);
4555 Pred = ICmpInst::getInversePredicate(Pred);
4556 }
4557
4558// A vector select may be shuffling together elements that are equivalent
4559// based on the max/min/select relationship.
4560Value *X = CmpLHS, *Y = CmpRHS;
4561bool PeekedThroughSelectShuffle =false;
4562auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4563if (Shuf && Shuf->isSelect()) {
4564if (Shuf->getOperand(0) ==Y)
4565 FVal = Shuf->getOperand(1);
4566elseif (Shuf->getOperand(1) ==Y)
4567 FVal = Shuf->getOperand(0);
4568else
4569returnnullptr;
4570 PeekedThroughSelectShuffle =true;
4571 }
4572
4573// (X pred Y) ? X : max/min(X, Y)
4574auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4575if (!MMI || TVal !=X ||
4576 !match(FVal,m_c_MaxOrMin(m_Specific(X),m_Specific(Y))))
4577returnnullptr;
4578
4579// (X > Y) ? X : max(X, Y) --> max(X, Y)
4580// (X >= Y) ? X : max(X, Y) --> max(X, Y)
4581// (X < Y) ? X : min(X, Y) --> min(X, Y)
4582// (X <= Y) ? X : min(X, Y) --> min(X, Y)
4583//
4584// The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4585// (X > Y) ? X : (Z ? max(X, Y) : Y)
4586// If Z is true, this reduces as above, and if Z is false:
4587// (X > Y) ? X : Y --> max(X, Y)
4588ICmpInst::Predicate MMPred = MMI->getPredicate();
4589if (MMPred ==CmpInst::getStrictPredicate(Pred))
4590return MMI;
4591
4592// Other transforms are not valid with a shuffle.
4593if (PeekedThroughSelectShuffle)
4594returnnullptr;
4595
4596// (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4597if (Pred ==CmpInst::ICMP_EQ)
4598return MMI;
4599
4600// (X != Y) ? X : max/min(X, Y) --> X
4601if (Pred ==CmpInst::ICMP_NE)
4602returnX;
4603
4604// (X < Y) ? X : max(X, Y) --> X
4605// (X <= Y) ? X : max(X, Y) --> X
4606// (X > Y) ? X : min(X, Y) --> X
4607// (X >= Y) ? X : min(X, Y) --> X
4608ICmpInst::Predicate InvPred =CmpInst::getInversePredicate(Pred);
4609if (MMPred ==CmpInst::getStrictPredicate(InvPred))
4610returnX;
4611
4612returnnullptr;
4613}
4614
4615/// An alternative way to test if a bit is set or not uses sgt/slt instead of
4616/// eq/ne.
4617staticValue *simplifySelectWithFakeICmpEq(Value *CmpLHS,Value *CmpRHS,
4618CmpPredicate Pred,Value *TrueVal,
4619Value *FalseVal) {
4620if (auto Res =decomposeBitTestICmp(CmpLHS, CmpRHS, Pred))
4621returnsimplifySelectBitTest(TrueVal, FalseVal, Res->X, &Res->Mask,
4622 Res->Pred == ICmpInst::ICMP_EQ);
4623
4624returnnullptr;
4625}
4626
4627/// Try to simplify a select instruction when its condition operand is an
4628/// integer equality or floating-point equivalence comparison.
4629staticValue *simplifySelectWithEquivalence(
4630ArrayRef<std::pair<Value *, Value *>> Replacements,Value *TrueVal,
4631Value *FalseVal,constSimplifyQuery &Q,unsigned MaxRecurse) {
4632Value *SimplifiedFalseVal =
4633simplifyWithOpsReplaced(FalseVal, Replacements, Q.getWithoutUndef(),
4634/* AllowRefinement */false,
4635/* DropFlags */nullptr, MaxRecurse);
4636if (!SimplifiedFalseVal)
4637 SimplifiedFalseVal = FalseVal;
4638
4639Value *SimplifiedTrueVal =
4640simplifyWithOpsReplaced(TrueVal, Replacements, Q,
4641/* AllowRefinement */true,
4642/* DropFlags */nullptr, MaxRecurse);
4643if (!SimplifiedTrueVal)
4644 SimplifiedTrueVal = TrueVal;
4645
4646if (SimplifiedFalseVal == SimplifiedTrueVal)
4647return FalseVal;
4648
4649returnnullptr;
4650}
4651
4652/// Try to simplify a select instruction when its condition operand is an
4653/// integer comparison.
4654staticValue *simplifySelectWithICmpCond(Value *CondVal,Value *TrueVal,
4655Value *FalseVal,
4656constSimplifyQuery &Q,
4657unsigned MaxRecurse) {
4658CmpPredicate Pred;
4659Value *CmpLHS, *CmpRHS;
4660if (!match(CondVal,m_ICmp(Pred,m_Value(CmpLHS),m_Value(CmpRHS))))
4661returnnullptr;
4662
4663if (Value *V =simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4664return V;
4665
4666// Canonicalize ne to eq predicate.
4667if (Pred == ICmpInst::ICMP_NE) {
4668 Pred = ICmpInst::ICMP_EQ;
4669std::swap(TrueVal, FalseVal);
4670 }
4671
4672// Check for integer min/max with a limit constant:
4673// X > MIN_INT ? X : MIN_INT --> X
4674// X < MAX_INT ? X : MAX_INT --> X
4675if (TrueVal->getType()->isIntOrIntVectorTy()) {
4676Value *X, *Y;
4677SelectPatternFlavor SPF =
4678matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4679X,Y)
4680 .Flavor;
4681if (SelectPatternResult::isMinOrMax(SPF) && Pred ==getMinMaxPred(SPF)) {
4682APInt LimitC =getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4683X->getType()->getScalarSizeInBits());
4684if (match(Y,m_SpecificInt(LimitC)))
4685returnX;
4686 }
4687 }
4688
4689if (Pred == ICmpInst::ICMP_EQ &&match(CmpRHS,m_Zero())) {
4690Value *X;
4691constAPInt *Y;
4692if (match(CmpLHS,m_And(m_Value(X),m_APInt(Y))))
4693if (Value *V =simplifySelectBitTest(TrueVal, FalseVal,X,Y,
4694/*TrueWhenUnset=*/true))
4695return V;
4696
4697// Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4698Value *ShAmt;
4699auto isFsh =m_CombineOr(m_FShl(m_Value(X),m_Value(),m_Value(ShAmt)),
4700m_FShr(m_Value(),m_Value(X),m_Value(ShAmt)));
4701// (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4702// (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4703if (match(TrueVal, isFsh) && FalseVal ==X && CmpLHS == ShAmt)
4704returnX;
4705
4706// Test for a zero-shift-guard-op around rotates. These are used to
4707// avoid UB from oversized shifts in raw IR rotate patterns, but the
4708// intrinsics do not have that problem.
4709// We do not allow this transform for the general funnel shift case because
4710// that would not preserve the poison safety of the original code.
4711auto isRotate =
4712m_CombineOr(m_FShl(m_Value(X),m_Deferred(X),m_Value(ShAmt)),
4713m_FShr(m_Value(X),m_Deferred(X),m_Value(ShAmt)));
4714// (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4715// (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4716if (match(FalseVal, isRotate) && TrueVal ==X && CmpLHS == ShAmt &&
4717 Pred == ICmpInst::ICMP_EQ)
4718return FalseVal;
4719
4720// X == 0 ? abs(X) : -abs(X) --> -abs(X)
4721// X == 0 ? -abs(X) : abs(X) --> abs(X)
4722if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4723match(FalseVal,m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4724return FalseVal;
4725if (match(TrueVal,
4726m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4727match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4728return FalseVal;
4729 }
4730
4731// Check for other compares that behave like bit test.
4732if (Value *V =
4733simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4734return V;
4735
4736// If we have a scalar equality comparison, then we know the value in one of
4737// the arms of the select. See if substituting this value into the arm and
4738// simplifying the result yields the same value as the other arm.
4739if (Pred == ICmpInst::ICMP_EQ) {
4740if (Value *V =simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, TrueVal,
4741 FalseVal, Q, MaxRecurse))
4742return V;
4743if (Value *V =simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, TrueVal,
4744 FalseVal, Q, MaxRecurse))
4745return V;
4746
4747Value *X;
4748Value *Y;
4749// select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4750if (match(CmpLHS,m_Or(m_Value(X),m_Value(Y))) &&
4751match(CmpRHS,m_Zero())) {
4752// (X | Y) == 0 implies X == 0 and Y == 0.
4753if (Value *V =simplifySelectWithEquivalence(
4754 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4755return V;
4756 }
4757
4758// select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4759if (match(CmpLHS,m_And(m_Value(X),m_Value(Y))) &&
4760match(CmpRHS,m_AllOnes())) {
4761// (X & Y) == -1 implies X == -1 and Y == -1.
4762if (Value *V =simplifySelectWithEquivalence(
4763 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4764return V;
4765 }
4766 }
4767
4768returnnullptr;
4769}
4770
4771/// Try to simplify a select instruction when its condition operand is a
4772/// floating-point comparison.
4773staticValue *simplifySelectWithFCmp(Value *Cond,Value *T,Value *F,
4774constSimplifyQuery &Q,
4775unsigned MaxRecurse) {
4776CmpPredicate Pred;
4777Value *CmpLHS, *CmpRHS;
4778if (!match(Cond,m_FCmp(Pred,m_Value(CmpLHS),m_Value(CmpRHS))))
4779returnnullptr;
4780FCmpInst *I = cast<FCmpInst>(Cond);
4781
4782bool IsEquiv =I->isEquivalence();
4783if (I->isEquivalence(/*Invert=*/true)) {
4784std::swap(T,F);
4785 Pred = FCmpInst::getInversePredicate(Pred);
4786 IsEquiv =true;
4787 }
4788
4789// This transforms is safe if at least one operand is known to not be zero.
4790// Otherwise, the select can change the sign of a zero operand.
4791if (IsEquiv) {
4792if (Value *V =simplifySelectWithEquivalence({{CmpLHS, CmpRHS}},T,F, Q,
4793 MaxRecurse))
4794return V;
4795if (Value *V =simplifySelectWithEquivalence({{CmpRHS, CmpLHS}},T,F, Q,
4796 MaxRecurse))
4797return V;
4798 }
4799
4800// Canonicalize CmpLHS to be T, and CmpRHS to be F, if they're swapped.
4801if (CmpLHS ==F && CmpRHS ==T)
4802std::swap(CmpLHS, CmpRHS);
4803
4804if (CmpLHS !=T || CmpRHS !=F)
4805returnnullptr;
4806
4807// This transform is also safe if we do not have (do not care about) -0.0.
4808if (Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros()) {
4809// (T == F) ? T : F --> F
4810if (Pred == FCmpInst::FCMP_OEQ)
4811returnF;
4812
4813// (T != F) ? T : F --> T
4814if (Pred == FCmpInst::FCMP_UNE)
4815returnT;
4816 }
4817
4818returnnullptr;
4819}
4820
4821/// Given operands for a SelectInst, see if we can fold the result.
4822/// If not, this returns null.
4823staticValue *simplifySelectInst(Value *Cond,Value *TrueVal,Value *FalseVal,
4824constSimplifyQuery &Q,unsigned MaxRecurse) {
4825if (auto *CondC = dyn_cast<Constant>(Cond)) {
4826if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4827if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4828if (Constant *C =ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4829returnC;
4830
4831// select poison, X, Y -> poison
4832if (isa<PoisonValue>(CondC))
4833returnPoisonValue::get(TrueVal->getType());
4834
4835// select undef, X, Y -> X or Y
4836if (Q.isUndefValue(CondC))
4837return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4838
4839// select true, X, Y --> X
4840// select false, X, Y --> Y
4841// For vectors, allow undef/poison elements in the condition to match the
4842// defined elements, so we can eliminate the select.
4843if (match(CondC,m_One()))
4844return TrueVal;
4845if (match(CondC,m_Zero()))
4846return FalseVal;
4847 }
4848
4849assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4850"Select must have bool or bool vector condition");
4851assert(TrueVal->getType() == FalseVal->getType() &&
4852"Select must have same types for true/false ops");
4853
4854if (Cond->getType() == TrueVal->getType()) {
4855// select i1 Cond, i1 true, i1 false --> i1 Cond
4856if (match(TrueVal,m_One()) &&match(FalseVal,m_ZeroInt()))
4857returnCond;
4858
4859// (X && Y) ? X : Y --> Y (commuted 2 ways)
4860if (match(Cond,m_c_LogicalAnd(m_Specific(TrueVal),m_Specific(FalseVal))))
4861return FalseVal;
4862
4863// (X || Y) ? X : Y --> X (commuted 2 ways)
4864if (match(Cond,m_c_LogicalOr(m_Specific(TrueVal),m_Specific(FalseVal))))
4865return TrueVal;
4866
4867// (X || Y) ? false : X --> false (commuted 2 ways)
4868if (match(Cond,m_c_LogicalOr(m_Specific(FalseVal),m_Value())) &&
4869match(TrueVal,m_ZeroInt()))
4870returnConstantInt::getFalse(Cond->getType());
4871
4872// Match patterns that end in logical-and.
4873if (match(FalseVal,m_ZeroInt())) {
4874// !(X || Y) && X --> false (commuted 2 ways)
4875if (match(Cond,m_Not(m_c_LogicalOr(m_Specific(TrueVal),m_Value()))))
4876returnConstantInt::getFalse(Cond->getType());
4877// X && !(X || Y) --> false (commuted 2 ways)
4878if (match(TrueVal,m_Not(m_c_LogicalOr(m_Specific(Cond),m_Value()))))
4879returnConstantInt::getFalse(Cond->getType());
4880
4881// (X || Y) && Y --> Y (commuted 2 ways)
4882if (match(Cond,m_c_LogicalOr(m_Specific(TrueVal),m_Value())))
4883return TrueVal;
4884// Y && (X || Y) --> Y (commuted 2 ways)
4885if (match(TrueVal,m_c_LogicalOr(m_Specific(Cond),m_Value())))
4886returnCond;
4887
4888// (X || Y) && (X || !Y) --> X (commuted 8 ways)
4889Value *X, *Y;
4890if (match(Cond,m_c_LogicalOr(m_Value(X),m_Not(m_Value(Y)))) &&
4891match(TrueVal,m_c_LogicalOr(m_Specific(X),m_Specific(Y))))
4892returnX;
4893if (match(TrueVal,m_c_LogicalOr(m_Value(X),m_Not(m_Value(Y)))) &&
4894match(Cond,m_c_LogicalOr(m_Specific(X),m_Specific(Y))))
4895returnX;
4896 }
4897
4898// Match patterns that end in logical-or.
4899if (match(TrueVal,m_One())) {
4900// !(X && Y) || X --> true (commuted 2 ways)
4901if (match(Cond,m_Not(m_c_LogicalAnd(m_Specific(FalseVal),m_Value()))))
4902returnConstantInt::getTrue(Cond->getType());
4903// X || !(X && Y) --> true (commuted 2 ways)
4904if (match(FalseVal,m_Not(m_c_LogicalAnd(m_Specific(Cond),m_Value()))))
4905returnConstantInt::getTrue(Cond->getType());
4906
4907// (X && Y) || Y --> Y (commuted 2 ways)
4908if (match(Cond,m_c_LogicalAnd(m_Specific(FalseVal),m_Value())))
4909return FalseVal;
4910// Y || (X && Y) --> Y (commuted 2 ways)
4911if (match(FalseVal,m_c_LogicalAnd(m_Specific(Cond),m_Value())))
4912returnCond;
4913 }
4914 }
4915
4916// select ?, X, X -> X
4917if (TrueVal == FalseVal)
4918return TrueVal;
4919
4920if (Cond == TrueVal) {
4921// select i1 X, i1 X, i1 false --> X (logical-and)
4922if (match(FalseVal,m_ZeroInt()))
4923returnCond;
4924// select i1 X, i1 X, i1 true --> true
4925if (match(FalseVal,m_One()))
4926returnConstantInt::getTrue(Cond->getType());
4927 }
4928if (Cond == FalseVal) {
4929// select i1 X, i1 true, i1 X --> X (logical-or)
4930if (match(TrueVal,m_One()))
4931returnCond;
4932// select i1 X, i1 false, i1 X --> false
4933if (match(TrueVal,m_ZeroInt()))
4934returnConstantInt::getFalse(Cond->getType());
4935 }
4936
4937// If the true or false value is poison, we can fold to the other value.
4938// If the true or false value is undef, we can fold to the other value as
4939// long as the other value isn't poison.
4940// select ?, poison, X -> X
4941// select ?, undef, X -> X
4942if (isa<PoisonValue>(TrueVal) ||
4943 (Q.isUndefValue(TrueVal) &&impliesPoison(FalseVal,Cond)))
4944return FalseVal;
4945// select ?, X, poison -> X
4946// select ?, X, undef -> X
4947if (isa<PoisonValue>(FalseVal) ||
4948 (Q.isUndefValue(FalseVal) &&impliesPoison(TrueVal,Cond)))
4949return TrueVal;
4950
4951// Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4952Constant *TrueC, *FalseC;
4953if (isa<FixedVectorType>(TrueVal->getType()) &&
4954match(TrueVal,m_Constant(TrueC)) &&
4955match(FalseVal,m_Constant(FalseC))) {
4956unsigned NumElts =
4957 cast<FixedVectorType>(TrueC->getType())->getNumElements();
4958SmallVector<Constant *, 16> NewC;
4959for (unsigned i = 0; i != NumElts; ++i) {
4960// Bail out on incomplete vector constants.
4961Constant *TEltC = TrueC->getAggregateElement(i);
4962Constant *FEltC = FalseC->getAggregateElement(i);
4963if (!TEltC || !FEltC)
4964break;
4965
4966// If the elements match (undef or not), that value is the result. If only
4967// one element is undef, choose the defined element as the safe result.
4968if (TEltC == FEltC)
4969 NewC.push_back(TEltC);
4970elseif (isa<PoisonValue>(TEltC) ||
4971 (Q.isUndefValue(TEltC) &&isGuaranteedNotToBePoison(FEltC)))
4972 NewC.push_back(FEltC);
4973elseif (isa<PoisonValue>(FEltC) ||
4974 (Q.isUndefValue(FEltC) &&isGuaranteedNotToBePoison(TEltC)))
4975 NewC.push_back(TEltC);
4976else
4977break;
4978 }
4979if (NewC.size() == NumElts)
4980returnConstantVector::get(NewC);
4981 }
4982
4983if (Value *V =
4984simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4985return V;
4986
4987if (Value *V =simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4988return V;
4989
4990 std::optional<bool> Imp =isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4991if (Imp)
4992return *Imp ? TrueVal : FalseVal;
4993
4994returnnullptr;
4995}
4996
4997Value *llvm::simplifySelectInst(Value *Cond,Value *TrueVal,Value *FalseVal,
4998constSimplifyQuery &Q) {
4999 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q,RecursionLimit);
5000}
5001
5002/// Given operands for an GetElementPtrInst, see if we can fold the result.
5003/// If not, this returns null.
5004staticValue *simplifyGEPInst(Type *SrcTy,Value *Ptr,
5005ArrayRef<Value *> Indices,GEPNoWrapFlags NW,
5006constSimplifyQuery &Q,unsigned) {
5007// The type of the GEP pointer operand.
5008unsigned AS =
5009 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
5010
5011// getelementptr P -> P.
5012if (Indices.empty())
5013returnPtr;
5014
5015// Compute the (pointer) type returned by the GEP instruction.
5016Type *LastType =GetElementPtrInst::getIndexedType(SrcTy, Indices);
5017Type *GEPTy =Ptr->getType();
5018if (!GEPTy->isVectorTy()) {
5019for (Value *Op : Indices) {
5020// If one of the operands is a vector, the result type is a vector of
5021// pointers. All vector operands must have the same number of elements.
5022if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
5023 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5024break;
5025 }
5026 }
5027 }
5028
5029// All-zero GEP is a no-op, unless it performs a vector splat.
5030if (Ptr->getType() == GEPTy &&
5031all_of(Indices, [](constauto *V) {returnmatch(V,m_Zero()); }))
5032returnPtr;
5033
5034// getelementptr poison, idx -> poison
5035// getelementptr baseptr, poison -> poison
5036if (isa<PoisonValue>(Ptr) ||
5037any_of(Indices, [](constauto *V) {return isa<PoisonValue>(V); }))
5038returnPoisonValue::get(GEPTy);
5039
5040// getelementptr undef, idx -> undef
5041if (Q.isUndefValue(Ptr))
5042returnUndefValue::get(GEPTy);
5043
5044bool IsScalableVec =
5045 SrcTy->isScalableTy() ||any_of(Indices, [](constValue *V) {
5046return isa<ScalableVectorType>(V->getType());
5047 });
5048
5049if (Indices.size() == 1) {
5050Type *Ty = SrcTy;
5051if (!IsScalableVec && Ty->isSized()) {
5052Value *P;
5053uint64_tC;
5054uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5055// getelementptr P, N -> P if P points to a type of zero size.
5056if (TyAllocSize == 0 &&Ptr->getType() == GEPTy)
5057returnPtr;
5058
5059// The following transforms are only safe if the ptrtoint cast
5060// doesn't truncate the pointers.
5061if (Indices[0]->getType()->getScalarSizeInBits() ==
5062 Q.DL.getPointerSizeInBits(AS)) {
5063auto CanSimplify = [GEPTy, &P,Ptr]() ->bool {
5064returnP->getType() == GEPTy &&
5065getUnderlyingObject(P) ==getUnderlyingObject(Ptr);
5066 };
5067// getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5068if (TyAllocSize == 1 &&
5069match(Indices[0],
5070m_Sub(m_PtrToInt(m_Value(P)),m_PtrToInt(m_Specific(Ptr)))) &&
5071 CanSimplify())
5072returnP;
5073
5074// getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5075// size 1 << C.
5076if (match(Indices[0],m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
5077m_PtrToInt(m_Specific(Ptr))),
5078m_ConstantInt(C))) &&
5079 TyAllocSize == 1ULL <<C && CanSimplify())
5080returnP;
5081
5082// getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5083// size C.
5084if (match(Indices[0],m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
5085m_PtrToInt(m_Specific(Ptr))),
5086m_SpecificInt(TyAllocSize))) &&
5087 CanSimplify())
5088returnP;
5089 }
5090 }
5091 }
5092
5093if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5094all_of(Indices.drop_back(1),
5095 [](Value *Idx) { return match(Idx, m_Zero()); })) {
5096unsigned IdxWidth =
5097 Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
5098if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5099APInt BasePtrOffset(IdxWidth, 0);
5100Value *StrippedBasePtr =
5101Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5102
5103// Avoid creating inttoptr of zero here: While LLVMs treatment of
5104// inttoptr is generally conservative, this particular case is folded to
5105// a null pointer, which will have incorrect provenance.
5106
5107// gep (gep V, C), (sub 0, V) -> C
5108if (match(Indices.back(),
5109m_Neg(m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5110 !BasePtrOffset.isZero()) {
5111auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5112returnConstantExpr::getIntToPtr(CI, GEPTy);
5113 }
5114// gep (gep V, C), (xor V, -1) -> C-1
5115if (match(Indices.back(),
5116m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)),m_AllOnes())) &&
5117 !BasePtrOffset.isOne()) {
5118auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5119returnConstantExpr::getIntToPtr(CI, GEPTy);
5120 }
5121 }
5122 }
5123
5124// Check to see if this is constant foldable.
5125if (!isa<Constant>(Ptr) ||
5126 !all_of(Indices, [](Value *V) {return isa<Constant>(V); }))
5127returnnullptr;
5128
5129if (!ConstantExpr::isSupportedGetElementPtr(SrcTy))
5130returnConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), std::nullopt,
5131 Indices);
5132
5133auto *CE =
5134ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices, NW);
5135returnConstantFoldConstant(CE, Q.DL);
5136}
5137
5138Value *llvm::simplifyGEPInst(Type *SrcTy,Value *Ptr,ArrayRef<Value *> Indices,
5139GEPNoWrapFlags NW,constSimplifyQuery &Q) {
5140 return ::simplifyGEPInst(SrcTy,Ptr, Indices, NW, Q,RecursionLimit);
5141}
5142
5143/// Given operands for an InsertValueInst, see if we can fold the result.
5144/// If not, this returns null.
5145staticValue *simplifyInsertValueInst(Value *Agg,Value *Val,
5146ArrayRef<unsigned> Idxs,
5147constSimplifyQuery &Q,unsigned) {
5148if (Constant *CAgg = dyn_cast<Constant>(Agg))
5149if (Constant *CVal = dyn_cast<Constant>(Val))
5150returnConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5151
5152// insertvalue x, poison, n -> x
5153// insertvalue x, undef, n -> x if x cannot be poison
5154if (isa<PoisonValue>(Val) ||
5155 (Q.isUndefValue(Val) &&isGuaranteedNotToBePoison(Agg)))
5156return Agg;
5157
5158// insertvalue x, (extractvalue y, n), n
5159if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
5160if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5161 EV->getIndices() == Idxs) {
5162// insertvalue poison, (extractvalue y, n), n -> y
5163// insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5164if (isa<PoisonValue>(Agg) ||
5165 (Q.isUndefValue(Agg) &&
5166isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5167return EV->getAggregateOperand();
5168
5169// insertvalue y, (extractvalue y, n), n -> y
5170if (Agg == EV->getAggregateOperand())
5171return Agg;
5172 }
5173
5174returnnullptr;
5175}
5176
5177Value *llvm::simplifyInsertValueInst(Value *Agg,Value *Val,
5178ArrayRef<unsigned> Idxs,
5179constSimplifyQuery &Q) {
5180 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q,RecursionLimit);
5181}
5182
5183Value *llvm::simplifyInsertElementInst(Value *Vec,Value *Val,Value *Idx,
5184constSimplifyQuery &Q) {
5185// Try to constant fold.
5186auto *VecC = dyn_cast<Constant>(Vec);
5187auto *ValC = dyn_cast<Constant>(Val);
5188auto *IdxC = dyn_cast<Constant>(Idx);
5189if (VecC && ValC && IdxC)
5190returnConstantExpr::getInsertElement(VecC, ValC, IdxC);
5191
5192// For fixed-length vector, fold into poison if index is out of bounds.
5193if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5194if (isa<FixedVectorType>(Vec->getType()) &&
5195 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5196returnPoisonValue::get(Vec->getType());
5197 }
5198
5199// If index is undef, it might be out of bounds (see above case)
5200if (Q.isUndefValue(Idx))
5201returnPoisonValue::get(Vec->getType());
5202
5203// If the scalar is poison, or it is undef and there is no risk of
5204// propagating poison from the vector value, simplify to the vector value.
5205if (isa<PoisonValue>(Val) ||
5206 (Q.isUndefValue(Val) &&isGuaranteedNotToBePoison(Vec)))
5207return Vec;
5208
5209// Inserting the splatted value into a constant splat does nothing.
5210if (VecC && ValC && VecC->getSplatValue() == ValC)
5211return Vec;
5212
5213// If we are extracting a value from a vector, then inserting it into the same
5214// place, that's the input vector:
5215// insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5216if (match(Val,m_ExtractElt(m_Specific(Vec),m_Specific(Idx))))
5217return Vec;
5218
5219returnnullptr;
5220}
5221
5222/// Given operands for an ExtractValueInst, see if we can fold the result.
5223/// If not, this returns null.
5224staticValue *simplifyExtractValueInst(Value *Agg,ArrayRef<unsigned> Idxs,
5225constSimplifyQuery &,unsigned) {
5226if (auto *CAgg = dyn_cast<Constant>(Agg))
5227returnConstantFoldExtractValueInstruction(CAgg, Idxs);
5228
5229// extractvalue x, (insertvalue y, elt, n), n -> elt
5230unsigned NumIdxs = Idxs.size();
5231for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI !=nullptr;
5232 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5233ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5234unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5235unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5236if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5237 Idxs.slice(0, NumCommonIdxs)) {
5238if (NumIdxs == NumInsertValueIdxs)
5239return IVI->getInsertedValueOperand();
5240break;
5241 }
5242 }
5243
5244returnnullptr;
5245}
5246
5247Value *llvm::simplifyExtractValueInst(Value *Agg,ArrayRef<unsigned> Idxs,
5248constSimplifyQuery &Q) {
5249 return ::simplifyExtractValueInst(Agg, Idxs, Q,RecursionLimit);
5250}
5251
5252/// Given operands for an ExtractElementInst, see if we can fold the result.
5253/// If not, this returns null.
5254staticValue *simplifyExtractElementInst(Value *Vec,Value *Idx,
5255constSimplifyQuery &Q,unsigned) {
5256auto *VecVTy = cast<VectorType>(Vec->getType());
5257if (auto *CVec = dyn_cast<Constant>(Vec)) {
5258if (auto *CIdx = dyn_cast<Constant>(Idx))
5259returnConstantExpr::getExtractElement(CVec, CIdx);
5260
5261if (Q.isUndefValue(Vec))
5262returnUndefValue::get(VecVTy->getElementType());
5263 }
5264
5265// An undef extract index can be arbitrarily chosen to be an out-of-range
5266// index value, which would result in the instruction being poison.
5267if (Q.isUndefValue(Idx))
5268returnPoisonValue::get(VecVTy->getElementType());
5269
5270// If extracting a specified index from the vector, see if we can recursively
5271// find a previously computed scalar that was inserted into the vector.
5272if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5273// For fixed-length vector, fold into undef if index is out of bounds.
5274unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5275if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5276returnPoisonValue::get(VecVTy->getElementType());
5277// Handle case where an element is extracted from a splat.
5278if (IdxC->getValue().ult(MinNumElts))
5279if (auto *Splat =getSplatValue(Vec))
5280returnSplat;
5281if (Value *Elt =findScalarElement(Vec, IdxC->getZExtValue()))
5282return Elt;
5283 }else {
5284// extractelt x, (insertelt y, elt, n), n -> elt
5285// If the possibly-variable indices are trivially known to be equal
5286// (because they are the same operand) then use the value that was
5287// inserted directly.
5288auto *IE = dyn_cast<InsertElementInst>(Vec);
5289if (IE && IE->getOperand(2) ==Idx)
5290return IE->getOperand(1);
5291
5292// The index is not relevant if our vector is a splat.
5293if (Value *Splat =getSplatValue(Vec))
5294returnSplat;
5295 }
5296returnnullptr;
5297}
5298
5299Value *llvm::simplifyExtractElementInst(Value *Vec,Value *Idx,
5300constSimplifyQuery &Q) {
5301 return ::simplifyExtractElementInst(Vec,Idx, Q,RecursionLimit);
5302}
5303
5304/// See if we can fold the given phi. If not, returns null.
5305staticValue *simplifyPHINode(PHINode *PN,ArrayRef<Value *> IncomingValues,
5306constSimplifyQuery &Q) {
5307// WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5308// here, because the PHI we may succeed simplifying to was not
5309// def-reachable from the original PHI!
5310
5311// If all of the PHI's incoming values are the same then replace the PHI node
5312// with the common value.
5313Value *CommonValue =nullptr;
5314bool HasPoisonInput =false;
5315bool HasUndefInput =false;
5316for (Value *Incoming : IncomingValues) {
5317// If the incoming value is the phi node itself, it can safely be skipped.
5318if (Incoming == PN)
5319continue;
5320if (isa<PoisonValue>(Incoming)) {
5321 HasPoisonInput =true;
5322continue;
5323 }
5324if (Q.isUndefValue(Incoming)) {
5325// Remember that we saw an undef value, but otherwise ignore them.
5326 HasUndefInput =true;
5327continue;
5328 }
5329if (CommonValue &&Incoming != CommonValue)
5330returnnullptr;// Not the same, bail out.
5331 CommonValue =Incoming;
5332 }
5333
5334// If CommonValue is null then all of the incoming values were either undef,
5335// poison or equal to the phi node itself.
5336if (!CommonValue)
5337return HasUndefInput ?UndefValue::get(PN->getType())
5338 :PoisonValue::get(PN->getType());
5339
5340if (HasPoisonInput || HasUndefInput) {
5341// If we have a PHI node like phi(X, undef, X), where X is defined by some
5342// instruction, we cannot return X as the result of the PHI node unless it
5343// dominates the PHI block.
5344if (!valueDominatesPHI(CommonValue, PN, Q.DT))
5345returnnullptr;
5346
5347// Make sure we do not replace an undef value with poison.
5348if (HasUndefInput &&
5349 !isGuaranteedNotToBePoison(CommonValue, Q.AC, Q.CxtI, Q.DT))
5350returnnullptr;
5351return CommonValue;
5352 }
5353
5354return CommonValue;
5355}
5356
5357staticValue *simplifyCastInst(unsigned CastOpc,Value *Op,Type *Ty,
5358constSimplifyQuery &Q,unsigned MaxRecurse) {
5359if (auto *C = dyn_cast<Constant>(Op))
5360returnConstantFoldCastOperand(CastOpc,C, Ty, Q.DL);
5361
5362if (auto *CI = dyn_cast<CastInst>(Op)) {
5363auto *Src = CI->getOperand(0);
5364Type *SrcTy = Src->getType();
5365Type *MidTy = CI->getType();
5366Type *DstTy = Ty;
5367if (Src->getType() == Ty) {
5368auto FirstOp =static_cast<Instruction::CastOps>(CI->getOpcode());
5369auto SecondOp =static_cast<Instruction::CastOps>(CastOpc);
5370Type *SrcIntPtrTy =
5371 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) :nullptr;
5372Type *MidIntPtrTy =
5373 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) :nullptr;
5374Type *DstIntPtrTy =
5375 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) :nullptr;
5376if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5377 SrcIntPtrTy, MidIntPtrTy,
5378 DstIntPtrTy) == Instruction::BitCast)
5379return Src;
5380 }
5381 }
5382
5383// bitcast x -> x
5384if (CastOpc == Instruction::BitCast)
5385if (Op->getType() == Ty)
5386returnOp;
5387
5388// ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
5389Value *Ptr, *X;
5390if (CastOpc == Instruction::PtrToInt &&
5391match(Op,m_PtrAdd(m_Value(Ptr),
5392m_Sub(m_Value(X),m_PtrToInt(m_Deferred(Ptr))))) &&
5393X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
5394returnX;
5395
5396returnnullptr;
5397}
5398
5399Value *llvm::simplifyCastInst(unsigned CastOpc,Value *Op,Type *Ty,
5400constSimplifyQuery &Q) {
5401 return ::simplifyCastInst(CastOpc,Op, Ty, Q,RecursionLimit);
5402}
5403
5404/// For the given destination element of a shuffle, peek through shuffles to
5405/// match a root vector source operand that contains that element in the same
5406/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5407staticValue *foldIdentityShuffles(int DestElt,Value *Op0,Value *Op1,
5408int MaskVal,Value *RootVec,
5409unsigned MaxRecurse) {
5410if (!MaxRecurse--)
5411returnnullptr;
5412
5413// Bail out if any mask value is undefined. That kind of shuffle may be
5414// simplified further based on demanded bits or other folds.
5415if (MaskVal == -1)
5416returnnullptr;
5417
5418// The mask value chooses which source operand we need to look at next.
5419int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5420int RootElt = MaskVal;
5421Value *SourceOp = Op0;
5422if (MaskVal >= InVecNumElts) {
5423 RootElt = MaskVal - InVecNumElts;
5424 SourceOp = Op1;
5425 }
5426
5427// If the source operand is a shuffle itself, look through it to find the
5428// matching root vector.
5429if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5430returnfoldIdentityShuffles(
5431 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5432 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5433 }
5434
5435// The source operand is not a shuffle. Initialize the root vector value for
5436// this shuffle if that has not been done yet.
5437if (!RootVec)
5438 RootVec = SourceOp;
5439
5440// Give up as soon as a source operand does not match the existing root value.
5441if (RootVec != SourceOp)
5442returnnullptr;
5443
5444// The element must be coming from the same lane in the source vector
5445// (although it may have crossed lanes in intermediate shuffles).
5446if (RootElt != DestElt)
5447returnnullptr;
5448
5449return RootVec;
5450}
5451
5452staticValue *simplifyShuffleVectorInst(Value *Op0,Value *Op1,
5453ArrayRef<int> Mask,Type *RetTy,
5454constSimplifyQuery &Q,
5455unsigned MaxRecurse) {
5456if (all_of(Mask, [](int Elem) {return Elem ==PoisonMaskElem; }))
5457returnPoisonValue::get(RetTy);
5458
5459auto *InVecTy = cast<VectorType>(Op0->getType());
5460unsigned MaskNumElts = Mask.size();
5461ElementCount InVecEltCount = InVecTy->getElementCount();
5462
5463bool Scalable = InVecEltCount.isScalable();
5464
5465SmallVector<int, 32> Indices;
5466 Indices.assign(Mask.begin(), Mask.end());
5467
5468// Canonicalization: If mask does not select elements from an input vector,
5469// replace that input vector with poison.
5470if (!Scalable) {
5471bool MaskSelects0 =false, MaskSelects1 =false;
5472unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5473for (unsigned i = 0; i != MaskNumElts; ++i) {
5474if (Indices[i] == -1)
5475continue;
5476if ((unsigned)Indices[i] < InVecNumElts)
5477 MaskSelects0 =true;
5478else
5479 MaskSelects1 =true;
5480 }
5481if (!MaskSelects0)
5482 Op0 =PoisonValue::get(InVecTy);
5483if (!MaskSelects1)
5484 Op1 =PoisonValue::get(InVecTy);
5485 }
5486
5487auto *Op0Const = dyn_cast<Constant>(Op0);
5488auto *Op1Const = dyn_cast<Constant>(Op1);
5489
5490// If all operands are constant, constant fold the shuffle. This
5491// transformation depends on the value of the mask which is not known at
5492// compile time for scalable vectors
5493if (Op0Const && Op1Const)
5494returnConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5495
5496// Canonicalization: if only one input vector is constant, it shall be the
5497// second one. This transformation depends on the value of the mask which
5498// is not known at compile time for scalable vectors
5499if (!Scalable && Op0Const && !Op1Const) {
5500std::swap(Op0, Op1);
5501ShuffleVectorInst::commuteShuffleMask(Indices,
5502 InVecEltCount.getKnownMinValue());
5503 }
5504
5505// A splat of an inserted scalar constant becomes a vector constant:
5506// shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5507// NOTE: We may have commuted above, so analyze the updated Indices, not the
5508// original mask constant.
5509// NOTE: This transformation depends on the value of the mask which is not
5510// known at compile time for scalable vectors
5511Constant *C;
5512ConstantInt *IndexC;
5513if (!Scalable &&match(Op0,m_InsertElt(m_Value(),m_Constant(C),
5514m_ConstantInt(IndexC)))) {
5515// Match a splat shuffle mask of the insert index allowing undef elements.
5516int InsertIndex = IndexC->getZExtValue();
5517if (all_of(Indices, [InsertIndex](int MaskElt) {
5518return MaskElt == InsertIndex || MaskElt == -1;
5519 })) {
5520assert(isa<UndefValue>(Op1) &&"Expected undef operand 1 for splat");
5521
5522// Shuffle mask poisons become poison constant result elements.
5523SmallVector<Constant *, 16> VecC(MaskNumElts,C);
5524for (unsigned i = 0; i != MaskNumElts; ++i)
5525if (Indices[i] == -1)
5526 VecC[i] =PoisonValue::get(C->getType());
5527returnConstantVector::get(VecC);
5528 }
5529 }
5530
5531// A shuffle of a splat is always the splat itself. Legal if the shuffle's
5532// value type is same as the input vectors' type.
5533if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5534if (Q.isUndefValue(Op1) &&RetTy == InVecTy &&
5535all_equal(OpShuf->getShuffleMask()))
5536return Op0;
5537
5538// All remaining transformation depend on the value of the mask, which is
5539// not known at compile time for scalable vectors.
5540if (Scalable)
5541returnnullptr;
5542
5543// Don't fold a shuffle with undef mask elements. This may get folded in a
5544// better way using demanded bits or other analysis.
5545// TODO: Should we allow this?
5546if (is_contained(Indices, -1))
5547returnnullptr;
5548
5549// Check if every element of this shuffle can be mapped back to the
5550// corresponding element of a single root vector. If so, we don't need this
5551// shuffle. This handles simple identity shuffles as well as chains of
5552// shuffles that may widen/narrow and/or move elements across lanes and back.
5553Value *RootVec =nullptr;
5554for (unsigned i = 0; i != MaskNumElts; ++i) {
5555// Note that recursion is limited for each vector element, so if any element
5556// exceeds the limit, this will fail to simplify.
5557 RootVec =
5558foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5559
5560// We can't replace a widening/narrowing shuffle with one of its operands.
5561if (!RootVec || RootVec->getType() !=RetTy)
5562returnnullptr;
5563 }
5564return RootVec;
5565}
5566
5567/// Given operands for a ShuffleVectorInst, fold the result or return null.
5568Value *llvm::simplifyShuffleVectorInst(Value *Op0,Value *Op1,
5569ArrayRef<int> Mask,Type *RetTy,
5570constSimplifyQuery &Q) {
5571 return ::simplifyShuffleVectorInst(Op0, Op1, Mask,RetTy, Q,RecursionLimit);
5572}
5573
5574staticConstant *foldConstant(Instruction::UnaryOps Opcode,Value *&Op,
5575constSimplifyQuery &Q) {
5576if (auto *C = dyn_cast<Constant>(Op))
5577returnConstantFoldUnaryOpOperand(Opcode,C, Q.DL);
5578returnnullptr;
5579}
5580
5581/// Given the operand for an FNeg, see if we can fold the result. If not, this
5582/// returns null.
5583staticValue *simplifyFNegInst(Value *Op,FastMathFlags FMF,
5584constSimplifyQuery &Q,unsigned MaxRecurse) {
5585if (Constant *C =foldConstant(Instruction::FNeg,Op, Q))
5586returnC;
5587
5588Value *X;
5589// fneg (fneg X) ==> X
5590if (match(Op,m_FNeg(m_Value(X))))
5591returnX;
5592
5593returnnullptr;
5594}
5595
5596Value *llvm::simplifyFNegInst(Value *Op,FastMathFlags FMF,
5597constSimplifyQuery &Q) {
5598 return ::simplifyFNegInst(Op, FMF, Q,RecursionLimit);
5599}
5600
5601/// Try to propagate existing NaN values when possible. If not, replace the
5602/// constant or elements in the constant with a canonical NaN.
5603staticConstant *propagateNaN(Constant *In) {
5604Type *Ty = In->getType();
5605if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5606unsigned NumElts = VecTy->getNumElements();
5607SmallVector<Constant *, 32> NewC(NumElts);
5608for (unsigned i = 0; i != NumElts; ++i) {
5609Constant *EltC = In->getAggregateElement(i);
5610// Poison elements propagate. NaN propagates except signaling is quieted.
5611// Replace unknown or undef elements with canonical NaN.
5612if (EltC && isa<PoisonValue>(EltC))
5613 NewC[i] = EltC;
5614elseif (EltC && EltC->isNaN())
5615 NewC[i] = ConstantFP::get(
5616 EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5617else
5618 NewC[i] =ConstantFP::getNaN(VecTy->getElementType());
5619 }
5620returnConstantVector::get(NewC);
5621 }
5622
5623// If it is not a fixed vector, but not a simple NaN either, return a
5624// canonical NaN.
5625if (!In->isNaN())
5626returnConstantFP::getNaN(Ty);
5627
5628// If we known this is a NaN, and it's scalable vector, we must have a splat
5629// on our hands. Grab that before splatting a QNaN constant.
5630if (isa<ScalableVectorType>(Ty)) {
5631auto *Splat = In->getSplatValue();
5632assert(Splat &&Splat->isNaN() &&
5633"Found a scalable-vector NaN but not a splat");
5634 In =Splat;
5635 }
5636
5637// Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5638// preserve the sign/payload.
5639return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5640}
5641
5642/// Perform folds that are common to any floating-point operation. This implies
5643/// transforms based on poison/undef/NaN because the operation itself makes no
5644/// difference to the result.
5645staticConstant *simplifyFPOp(ArrayRef<Value *> Ops,FastMathFlags FMF,
5646constSimplifyQuery &Q,
5647fp::ExceptionBehavior ExBehavior,
5648RoundingMode Rounding) {
5649// Poison is independent of anything else. It always propagates from an
5650// operand to a math result.
5651if (any_of(Ops, [](Value *V) {returnmatch(V,m_Poison()); }))
5652returnPoisonValue::get(Ops[0]->getType());
5653
5654for (Value *V : Ops) {
5655bool IsNan =match(V,m_NaN());
5656bool IsInf =match(V,m_Inf());
5657bool IsUndef = Q.isUndefValue(V);
5658
5659// If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5660// (an undef operand can be chosen to be Nan/Inf), then the result of
5661// this operation is poison.
5662if (FMF.noNaNs() && (IsNan || IsUndef))
5663returnPoisonValue::get(V->getType());
5664if (FMF.noInfs() && (IsInf || IsUndef))
5665returnPoisonValue::get(V->getType());
5666
5667if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5668// Undef does not propagate because undef means that all bits can take on
5669// any value. If this is undef * NaN for example, then the result values
5670// (at least the exponent bits) are limited. Assume the undef is a
5671// canonical NaN and propagate that.
5672if (IsUndef)
5673returnConstantFP::getNaN(V->getType());
5674if (IsNan)
5675returnpropagateNaN(cast<Constant>(V));
5676 }elseif (ExBehavior !=fp::ebStrict) {
5677if (IsNan)
5678returnpropagateNaN(cast<Constant>(V));
5679 }
5680 }
5681returnnullptr;
5682}
5683
5684/// Given operands for an FAdd, see if we can fold the result. If not, this
5685/// returns null.
5686staticValue *
5687simplifyFAddInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5688constSimplifyQuery &Q,unsigned MaxRecurse,
5689fp::ExceptionBehavior ExBehavior =fp::ebIgnore,
5690RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5691if (isDefaultFPEnvironment(ExBehavior, Rounding))
5692if (Constant *C =foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5693returnC;
5694
5695if (Constant *C =simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5696returnC;
5697
5698// fadd X, -0 ==> X
5699// With strict/constrained FP, we have these possible edge cases that do
5700// not simplify to Op0:
5701// fadd SNaN, -0.0 --> QNaN
5702// fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5703if (canIgnoreSNaN(ExBehavior, FMF) &&
5704 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5705 FMF.noSignedZeros()))
5706if (match(Op1,m_NegZeroFP()))
5707return Op0;
5708
5709// fadd X, 0 ==> X, when we know X is not -0
5710if (canIgnoreSNaN(ExBehavior, FMF))
5711if (match(Op1,m_PosZeroFP()) &&
5712 (FMF.noSignedZeros() ||cannotBeNegativeZero(Op0,/*Depth=*/0, Q)))
5713return Op0;
5714
5715if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5716returnnullptr;
5717
5718if (FMF.noNaNs()) {
5719// With nnan: X + {+/-}Inf --> {+/-}Inf
5720if (match(Op1,m_Inf()))
5721return Op1;
5722
5723// With nnan: -X + X --> 0.0 (and commuted variant)
5724// We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5725// Negative zeros are allowed because we always end up with positive zero:
5726// X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5727// X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5728// X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5729// X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5730if (match(Op0,m_FSub(m_AnyZeroFP(),m_Specific(Op1))) ||
5731match(Op1,m_FSub(m_AnyZeroFP(),m_Specific(Op0))))
5732returnConstantFP::getZero(Op0->getType());
5733
5734if (match(Op0,m_FNeg(m_Specific(Op1))) ||
5735match(Op1,m_FNeg(m_Specific(Op0))))
5736returnConstantFP::getZero(Op0->getType());
5737 }
5738
5739// (X - Y) + Y --> X
5740// Y + (X - Y) --> X
5741Value *X;
5742if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5743 (match(Op0,m_FSub(m_Value(X),m_Specific(Op1))) ||
5744match(Op1,m_FSub(m_Value(X),m_Specific(Op0)))))
5745returnX;
5746
5747returnnullptr;
5748}
5749
5750/// Given operands for an FSub, see if we can fold the result. If not, this
5751/// returns null.
5752staticValue *
5753simplifyFSubInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5754constSimplifyQuery &Q,unsigned MaxRecurse,
5755fp::ExceptionBehavior ExBehavior =fp::ebIgnore,
5756RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5757if (isDefaultFPEnvironment(ExBehavior, Rounding))
5758if (Constant *C =foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5759returnC;
5760
5761if (Constant *C =simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5762returnC;
5763
5764// fsub X, +0 ==> X
5765if (canIgnoreSNaN(ExBehavior, FMF) &&
5766 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5767 FMF.noSignedZeros()))
5768if (match(Op1,m_PosZeroFP()))
5769return Op0;
5770
5771// fsub X, -0 ==> X, when we know X is not -0
5772if (canIgnoreSNaN(ExBehavior, FMF))
5773if (match(Op1,m_NegZeroFP()) &&
5774 (FMF.noSignedZeros() ||cannotBeNegativeZero(Op0,/*Depth=*/0, Q)))
5775return Op0;
5776
5777// fsub -0.0, (fsub -0.0, X) ==> X
5778// fsub -0.0, (fneg X) ==> X
5779Value *X;
5780if (canIgnoreSNaN(ExBehavior, FMF))
5781if (match(Op0,m_NegZeroFP()) &&match(Op1,m_FNeg(m_Value(X))))
5782returnX;
5783
5784// fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5785// fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5786if (canIgnoreSNaN(ExBehavior, FMF))
5787if (FMF.noSignedZeros() &&match(Op0,m_AnyZeroFP()) &&
5788 (match(Op1,m_FSub(m_AnyZeroFP(),m_Value(X))) ||
5789match(Op1,m_FNeg(m_Value(X)))))
5790returnX;
5791
5792if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5793returnnullptr;
5794
5795if (FMF.noNaNs()) {
5796// fsub nnan x, x ==> 0.0
5797if (Op0 == Op1)
5798returnConstant::getNullValue(Op0->getType());
5799
5800// With nnan: {+/-}Inf - X --> {+/-}Inf
5801if (match(Op0,m_Inf()))
5802return Op0;
5803
5804// With nnan: X - {+/-}Inf --> {-/+}Inf
5805if (match(Op1,m_Inf()))
5806returnfoldConstant(Instruction::FNeg, Op1, Q);
5807 }
5808
5809// Y - (Y - X) --> X
5810// (X + Y) - Y --> X
5811if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5812 (match(Op1,m_FSub(m_Specific(Op0),m_Value(X))) ||
5813match(Op0,m_c_FAdd(m_Specific(Op1),m_Value(X)))))
5814returnX;
5815
5816returnnullptr;
5817}
5818
5819staticValue *simplifyFMAFMul(Value *Op0,Value *Op1,FastMathFlags FMF,
5820constSimplifyQuery &Q,unsigned MaxRecurse,
5821fp::ExceptionBehavior ExBehavior,
5822RoundingMode Rounding) {
5823if (Constant *C =simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5824returnC;
5825
5826if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5827returnnullptr;
5828
5829// Canonicalize special constants as operand 1.
5830if (match(Op0,m_FPOne()) ||match(Op0,m_AnyZeroFP()))
5831std::swap(Op0, Op1);
5832
5833// X * 1.0 --> X
5834if (match(Op1,m_FPOne()))
5835return Op0;
5836
5837if (match(Op1,m_AnyZeroFP())) {
5838// X * 0.0 --> 0.0 (with nnan and nsz)
5839if (FMF.noNaNs() && FMF.noSignedZeros())
5840returnConstantFP::getZero(Op0->getType());
5841
5842KnownFPClass Known =
5843computeKnownFPClass(Op0, FMF,fcInf |fcNan,/*Depth=*/0, Q);
5844if (Known.isKnownNever(fcInf |fcNan)) {
5845// +normal number * (-)0.0 --> (-)0.0
5846if (Known.SignBit ==false)
5847return Op1;
5848// -normal number * (-)0.0 --> -(-)0.0
5849if (Known.SignBit ==true)
5850returnfoldConstant(Instruction::FNeg, Op1, Q);
5851 }
5852 }
5853
5854// sqrt(X) * sqrt(X) --> X, if we can:
5855// 1. Remove the intermediate rounding (reassociate).
5856// 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5857// 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5858Value *X;
5859if (Op0 == Op1 &&match(Op0,m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5860 FMF.noNaNs() && FMF.noSignedZeros())
5861returnX;
5862
5863returnnullptr;
5864}
5865
5866/// Given the operands for an FMul, see if we can fold the result
5867staticValue *
5868simplifyFMulInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5869constSimplifyQuery &Q,unsigned MaxRecurse,
5870fp::ExceptionBehavior ExBehavior =fp::ebIgnore,
5871RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5872if (isDefaultFPEnvironment(ExBehavior, Rounding))
5873if (Constant *C =foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5874returnC;
5875
5876// Now apply simplifications that do not require rounding.
5877returnsimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5878}
5879
5880Value *llvm::simplifyFAddInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5881constSimplifyQuery &Q,
5882fp::ExceptionBehavior ExBehavior,
5883RoundingMode Rounding) {
5884 return ::simplifyFAddInst(Op0, Op1, FMF, Q,RecursionLimit, ExBehavior,
5885 Rounding);
5886}
5887
5888Value *llvm::simplifyFSubInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5889constSimplifyQuery &Q,
5890fp::ExceptionBehavior ExBehavior,
5891RoundingMode Rounding) {
5892 return ::simplifyFSubInst(Op0, Op1, FMF, Q,RecursionLimit, ExBehavior,
5893 Rounding);
5894}
5895
5896Value *llvm::simplifyFMulInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5897constSimplifyQuery &Q,
5898fp::ExceptionBehavior ExBehavior,
5899RoundingMode Rounding) {
5900 return ::simplifyFMulInst(Op0, Op1, FMF, Q,RecursionLimit, ExBehavior,
5901 Rounding);
5902}
5903
5904Value *llvm::simplifyFMAFMul(Value *Op0,Value *Op1,FastMathFlags FMF,
5905constSimplifyQuery &Q,
5906fp::ExceptionBehavior ExBehavior,
5907RoundingMode Rounding) {
5908 return ::simplifyFMAFMul(Op0, Op1, FMF, Q,RecursionLimit, ExBehavior,
5909 Rounding);
5910}
5911
5912staticValue *
5913simplifyFDivInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5914constSimplifyQuery &Q,unsigned,
5915fp::ExceptionBehavior ExBehavior =fp::ebIgnore,
5916RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5917if (isDefaultFPEnvironment(ExBehavior, Rounding))
5918if (Constant *C =foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5919returnC;
5920
5921if (Constant *C =simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5922returnC;
5923
5924if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5925returnnullptr;
5926
5927// X / 1.0 -> X
5928if (match(Op1,m_FPOne()))
5929return Op0;
5930
5931// 0 / X -> 0
5932// Requires that NaNs are off (X could be zero) and signed zeroes are
5933// ignored (X could be positive or negative, so the output sign is unknown).
5934if (FMF.noNaNs() && FMF.noSignedZeros() &&match(Op0,m_AnyZeroFP()))
5935returnConstantFP::getZero(Op0->getType());
5936
5937if (FMF.noNaNs()) {
5938// X / X -> 1.0 is legal when NaNs are ignored.
5939// We can ignore infinities because INF/INF is NaN.
5940if (Op0 == Op1)
5941return ConstantFP::get(Op0->getType(), 1.0);
5942
5943// (X * Y) / Y --> X if we can reassociate to the above form.
5944Value *X;
5945if (FMF.allowReassoc() &&match(Op0,m_c_FMul(m_Value(X),m_Specific(Op1))))
5946returnX;
5947
5948// -X / X -> -1.0 and
5949// X / -X -> -1.0 are legal when NaNs are ignored.
5950// We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5951if (match(Op0,m_FNegNSZ(m_Specific(Op1))) ||
5952match(Op1,m_FNegNSZ(m_Specific(Op0))))
5953return ConstantFP::get(Op0->getType(), -1.0);
5954
5955// nnan ninf X / [-]0.0 -> poison
5956if (FMF.noInfs() &&match(Op1,m_AnyZeroFP()))
5957returnPoisonValue::get(Op1->getType());
5958 }
5959
5960returnnullptr;
5961}
5962
5963Value *llvm::simplifyFDivInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5964constSimplifyQuery &Q,
5965fp::ExceptionBehavior ExBehavior,
5966RoundingMode Rounding) {
5967 return ::simplifyFDivInst(Op0, Op1, FMF, Q,RecursionLimit, ExBehavior,
5968 Rounding);
5969}
5970
5971staticValue *
5972simplifyFRemInst(Value *Op0,Value *Op1,FastMathFlags FMF,
5973constSimplifyQuery &Q,unsigned,
5974fp::ExceptionBehavior ExBehavior =fp::ebIgnore,
5975RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5976if (isDefaultFPEnvironment(ExBehavior, Rounding))
5977if (Constant *C =foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5978returnC;
5979
5980if (Constant *C =simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5981returnC;
5982
5983if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5984returnnullptr;
5985
5986// Unlike fdiv, the result of frem always matches the sign of the dividend.
5987// The constant match may include undef elements in a vector, so return a full
5988// zero constant as the result.
5989if (FMF.noNaNs()) {
5990// +0 % X -> 0
5991if (match(Op0,m_PosZeroFP()))
5992returnConstantFP::getZero(Op0->getType());
5993// -0 % X -> -0
5994if (match(Op0,m_NegZeroFP()))
5995returnConstantFP::getNegativeZero(Op0->getType());
5996 }
5997
5998returnnullptr;
5999}
6000
6001Value *llvm::simplifyFRemInst(Value *Op0,Value *Op1,FastMathFlags FMF,
6002constSimplifyQuery &Q,
6003fp::ExceptionBehavior ExBehavior,
6004RoundingMode Rounding) {
6005 return ::simplifyFRemInst(Op0, Op1, FMF, Q,RecursionLimit, ExBehavior,
6006 Rounding);
6007}
6008
6009//=== Helper functions for higher up the class hierarchy.
6010
6011/// Given the operand for a UnaryOperator, see if we can fold the result.
6012/// If not, this returns null.
6013staticValue *simplifyUnOp(unsigned Opcode,Value *Op,constSimplifyQuery &Q,
6014unsigned MaxRecurse) {
6015switch (Opcode) {
6016case Instruction::FNeg:
6017returnsimplifyFNegInst(Op,FastMathFlags(), Q, MaxRecurse);
6018default:
6019llvm_unreachable("Unexpected opcode");
6020 }
6021}
6022
6023/// Given the operand for a UnaryOperator, see if we can fold the result.
6024/// If not, this returns null.
6025/// Try to use FastMathFlags when folding the result.
6026staticValue *simplifyFPUnOp(unsigned Opcode,Value *Op,
6027constFastMathFlags &FMF,constSimplifyQuery &Q,
6028unsigned MaxRecurse) {
6029switch (Opcode) {
6030case Instruction::FNeg:
6031returnsimplifyFNegInst(Op, FMF, Q, MaxRecurse);
6032default:
6033returnsimplifyUnOp(Opcode,Op, Q, MaxRecurse);
6034 }
6035}
6036
6037Value *llvm::simplifyUnOp(unsigned Opcode,Value *Op,constSimplifyQuery &Q) {
6038 return ::simplifyUnOp(Opcode,Op, Q,RecursionLimit);
6039}
6040
6041Value *llvm::simplifyUnOp(unsigned Opcode,Value *Op,FastMathFlags FMF,
6042constSimplifyQuery &Q) {
6043 return ::simplifyFPUnOp(Opcode,Op, FMF, Q,RecursionLimit);
6044}
6045
6046/// Given operands for a BinaryOperator, see if we can fold the result.
6047/// If not, this returns null.
6048staticValue *simplifyBinOp(unsigned Opcode,Value *LHS,Value *RHS,
6049constSimplifyQuery &Q,unsigned MaxRecurse) {
6050switch (Opcode) {
6051case Instruction::Add:
6052returnsimplifyAddInst(LHS,RHS,/* IsNSW */false,/* IsNUW */false, Q,
6053 MaxRecurse);
6054case Instruction::Sub:
6055returnsimplifySubInst(LHS,RHS,/* IsNSW */false,/* IsNUW */false, Q,
6056 MaxRecurse);
6057case Instruction::Mul:
6058returnsimplifyMulInst(LHS,RHS,/* IsNSW */false,/* IsNUW */false, Q,
6059 MaxRecurse);
6060case Instruction::SDiv:
6061returnsimplifySDivInst(LHS,RHS,/* IsExact */false, Q, MaxRecurse);
6062case Instruction::UDiv:
6063returnsimplifyUDivInst(LHS,RHS,/* IsExact */false, Q, MaxRecurse);
6064case Instruction::SRem:
6065returnsimplifySRemInst(LHS,RHS, Q, MaxRecurse);
6066case Instruction::URem:
6067returnsimplifyURemInst(LHS,RHS, Q, MaxRecurse);
6068case Instruction::Shl:
6069returnsimplifyShlInst(LHS,RHS,/* IsNSW */false,/* IsNUW */false, Q,
6070 MaxRecurse);
6071case Instruction::LShr:
6072returnsimplifyLShrInst(LHS,RHS,/* IsExact */false, Q, MaxRecurse);
6073case Instruction::AShr:
6074returnsimplifyAShrInst(LHS,RHS,/* IsExact */false, Q, MaxRecurse);
6075case Instruction::And:
6076returnsimplifyAndInst(LHS,RHS, Q, MaxRecurse);
6077case Instruction::Or:
6078returnsimplifyOrInst(LHS,RHS, Q, MaxRecurse);
6079case Instruction::Xor:
6080returnsimplifyXorInst(LHS,RHS, Q, MaxRecurse);
6081case Instruction::FAdd:
6082returnsimplifyFAddInst(LHS,RHS,FastMathFlags(), Q, MaxRecurse);
6083case Instruction::FSub:
6084returnsimplifyFSubInst(LHS,RHS,FastMathFlags(), Q, MaxRecurse);
6085case Instruction::FMul:
6086returnsimplifyFMulInst(LHS,RHS,FastMathFlags(), Q, MaxRecurse);
6087case Instruction::FDiv:
6088returnsimplifyFDivInst(LHS,RHS,FastMathFlags(), Q, MaxRecurse);
6089case Instruction::FRem:
6090returnsimplifyFRemInst(LHS,RHS,FastMathFlags(), Q, MaxRecurse);
6091default:
6092llvm_unreachable("Unexpected opcode");
6093 }
6094}
6095
6096/// Given operands for a BinaryOperator, see if we can fold the result.
6097/// If not, this returns null.
6098/// Try to use FastMathFlags when folding the result.
6099staticValue *simplifyBinOp(unsigned Opcode,Value *LHS,Value *RHS,
6100constFastMathFlags &FMF,constSimplifyQuery &Q,
6101unsigned MaxRecurse) {
6102switch (Opcode) {
6103case Instruction::FAdd:
6104returnsimplifyFAddInst(LHS,RHS, FMF, Q, MaxRecurse);
6105case Instruction::FSub:
6106returnsimplifyFSubInst(LHS,RHS, FMF, Q, MaxRecurse);
6107case Instruction::FMul:
6108returnsimplifyFMulInst(LHS,RHS, FMF, Q, MaxRecurse);
6109case Instruction::FDiv:
6110returnsimplifyFDivInst(LHS,RHS, FMF, Q, MaxRecurse);
6111default:
6112returnsimplifyBinOp(Opcode,LHS,RHS, Q, MaxRecurse);
6113 }
6114}
6115
6116Value *llvm::simplifyBinOp(unsigned Opcode,Value *LHS,Value *RHS,
6117constSimplifyQuery &Q) {
6118 return ::simplifyBinOp(Opcode,LHS,RHS, Q,RecursionLimit);
6119}
6120
6121Value *llvm::simplifyBinOp(unsigned Opcode,Value *LHS,Value *RHS,
6122FastMathFlags FMF,constSimplifyQuery &Q) {
6123 return ::simplifyBinOp(Opcode,LHS,RHS, FMF, Q,RecursionLimit);
6124}
6125
6126/// Given operands for a CmpInst, see if we can fold the result.
6127staticValue *simplifyCmpInst(CmpPredicatePredicate,Value *LHS,Value *RHS,
6128constSimplifyQuery &Q,unsigned MaxRecurse) {
6129if (CmpInst::isIntPredicate(Predicate))
6130returnsimplifyICmpInst(Predicate,LHS,RHS, Q, MaxRecurse);
6131returnsimplifyFCmpInst(Predicate,LHS,RHS,FastMathFlags(), Q, MaxRecurse);
6132}
6133
6134Value *llvm::simplifyCmpInst(CmpPredicate Predicate,Value *LHS,Value *RHS,
6135constSimplifyQuery &Q) {
6136 return ::simplifyCmpInst(Predicate,LHS,RHS, Q,RecursionLimit);
6137}
6138
6139staticboolisIdempotent(Intrinsic::IDID) {
6140switch (ID) {
6141default:
6142returnfalse;
6143
6144// Unary idempotent: f(f(x)) = f(x)
6145case Intrinsic::fabs:
6146case Intrinsic::floor:
6147case Intrinsic::ceil:
6148case Intrinsic::trunc:
6149case Intrinsic::rint:
6150case Intrinsic::nearbyint:
6151case Intrinsic::round:
6152case Intrinsic::roundeven:
6153case Intrinsic::canonicalize:
6154case Intrinsic::arithmetic_fence:
6155returntrue;
6156 }
6157}
6158
6159/// Return true if the intrinsic rounds a floating-point value to an integral
6160/// floating-point value (not an integer type).
6161staticboolremovesFPFraction(Intrinsic::IDID) {
6162switch (ID) {
6163default:
6164returnfalse;
6165
6166case Intrinsic::floor:
6167case Intrinsic::ceil:
6168case Intrinsic::trunc:
6169case Intrinsic::rint:
6170case Intrinsic::nearbyint:
6171case Intrinsic::round:
6172case Intrinsic::roundeven:
6173returntrue;
6174 }
6175}
6176
6177staticValue *simplifyRelativeLoad(Constant *Ptr,Constant *Offset,
6178constDataLayout &DL) {
6179GlobalValue *PtrSym;
6180APInt PtrOffset;
6181if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset,DL))
6182returnnullptr;
6183
6184Type *Int32Ty =Type::getInt32Ty(Ptr->getContext());
6185
6186auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6187if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6188returnnullptr;
6189
6190APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6191DL.getIndexTypeSizeInBits(Ptr->getType()));
6192if (OffsetInt.srem(4) != 0)
6193returnnullptr;
6194
6195Constant *Loaded =
6196ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt),DL);
6197if (!Loaded)
6198returnnullptr;
6199
6200auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6201if (!LoadedCE)
6202returnnullptr;
6203
6204if (LoadedCE->getOpcode() == Instruction::Trunc) {
6205 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6206if (!LoadedCE)
6207returnnullptr;
6208 }
6209
6210if (LoadedCE->getOpcode() != Instruction::Sub)
6211returnnullptr;
6212
6213auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6214if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6215returnnullptr;
6216auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6217
6218Constant *LoadedRHS = LoadedCE->getOperand(1);
6219GlobalValue *LoadedRHSSym;
6220APInt LoadedRHSOffset;
6221if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6222DL) ||
6223 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6224returnnullptr;
6225
6226return LoadedLHSPtr;
6227}
6228
6229// TODO: Need to pass in FastMathFlags
6230staticValue *simplifyLdexp(Value *Op0,Value *Op1,constSimplifyQuery &Q,
6231bool IsStrict) {
6232// ldexp(poison, x) -> poison
6233// ldexp(x, poison) -> poison
6234if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6235return Op0;
6236
6237// ldexp(undef, x) -> nan
6238if (Q.isUndefValue(Op0))
6239returnConstantFP::getNaN(Op0->getType());
6240
6241if (!IsStrict) {
6242// TODO: Could insert a canonicalize for strict
6243
6244// ldexp(x, undef) -> x
6245if (Q.isUndefValue(Op1))
6246return Op0;
6247 }
6248
6249constAPFloat *C =nullptr;
6250match(Op0,PatternMatch::m_APFloat(C));
6251
6252// These cases should be safe, even with strictfp.
6253// ldexp(0.0, x) -> 0.0
6254// ldexp(-0.0, x) -> -0.0
6255// ldexp(inf, x) -> inf
6256// ldexp(-inf, x) -> -inf
6257if (C && (C->isZero() ||C->isInfinity()))
6258return Op0;
6259
6260// These are canonicalization dropping, could do it if we knew how we could
6261// ignore denormal flushes and target handling of nan payload bits.
6262if (IsStrict)
6263returnnullptr;
6264
6265// TODO: Could quiet this with strictfp if the exception mode isn't strict.
6266if (C &&C->isNaN())
6267return ConstantFP::get(Op0->getType(),C->makeQuiet());
6268
6269// ldexp(x, 0) -> x
6270
6271// TODO: Could fold this if we know the exception mode isn't
6272// strict, we know the denormal mode and other target modes.
6273if (match(Op1,PatternMatch::m_ZeroInt()))
6274return Op0;
6275
6276returnnullptr;
6277}
6278
6279staticValue *simplifyUnaryIntrinsic(Function *F,Value *Op0,
6280constSimplifyQuery &Q,
6281constCallBase *Call) {
6282// Idempotent functions return the same result when called repeatedly.
6283Intrinsic::ID IID =F->getIntrinsicID();
6284if (isIdempotent(IID))
6285if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6286if (II->getIntrinsicID() == IID)
6287returnII;
6288
6289if (removesFPFraction(IID)) {
6290// Converting from int or calling a rounding function always results in a
6291// finite integral number or infinity. For those inputs, rounding functions
6292// always return the same value, so the (2nd) rounding is eliminated. Ex:
6293// floor (sitofp x) -> sitofp x
6294// round (ceil x) -> ceil x
6295auto *II = dyn_cast<IntrinsicInst>(Op0);
6296if ((II &&removesFPFraction(II->getIntrinsicID())) ||
6297match(Op0,m_SIToFP(m_Value())) ||match(Op0,m_UIToFP(m_Value())))
6298return Op0;
6299 }
6300
6301Value *X;
6302switch (IID) {
6303case Intrinsic::fabs:
6304if (computeKnownFPSignBit(Op0,/*Depth=*/0, Q) ==false)
6305return Op0;
6306break;
6307case Intrinsic::bswap:
6308// bswap(bswap(x)) -> x
6309if (match(Op0,m_BSwap(m_Value(X))))
6310returnX;
6311break;
6312case Intrinsic::bitreverse:
6313// bitreverse(bitreverse(x)) -> x
6314if (match(Op0,m_BitReverse(m_Value(X))))
6315returnX;
6316break;
6317case Intrinsic::ctpop: {
6318// ctpop(X) -> 1 iff X is non-zero power of 2.
6319if (isKnownToBeAPowerOfTwo(Op0, Q.DL,/*OrZero*/false, 0, Q.AC, Q.CxtI,
6320 Q.DT))
6321return ConstantInt::get(Op0->getType(), 1);
6322// If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6323// ctpop(and X, 1) --> and X, 1
6324unsignedBitWidth = Op0->getType()->getScalarSizeInBits();
6325if (MaskedValueIsZero(Op0,APInt::getHighBitsSet(BitWidth,BitWidth - 1),
6326 Q))
6327return Op0;
6328break;
6329 }
6330case Intrinsic::exp:
6331// exp(log(x)) -> x
6332if (Call->hasAllowReassoc() &&
6333match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X))))
6334returnX;
6335break;
6336case Intrinsic::exp2:
6337// exp2(log2(x)) -> x
6338if (Call->hasAllowReassoc() &&
6339match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
6340returnX;
6341break;
6342case Intrinsic::exp10:
6343// exp10(log10(x)) -> x
6344if (Call->hasAllowReassoc() &&
6345match(Op0, m_Intrinsic<Intrinsic::log10>(m_Value(X))))
6346returnX;
6347break;
6348case Intrinsic::log:
6349// log(exp(x)) -> x
6350if (Call->hasAllowReassoc() &&
6351match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
6352returnX;
6353break;
6354case Intrinsic::log2:
6355// log2(exp2(x)) -> x
6356if (Call->hasAllowReassoc() &&
6357 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
6358match(Op0,
6359 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0),m_Value(X)))))
6360returnX;
6361break;
6362case Intrinsic::log10:
6363// log10(pow(10.0, x)) -> x
6364// log10(exp10(x)) -> x
6365if (Call->hasAllowReassoc() &&
6366 (match(Op0, m_Intrinsic<Intrinsic::exp10>(m_Value(X))) ||
6367match(Op0,
6368 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0),m_Value(X)))))
6369returnX;
6370break;
6371case Intrinsic::vector_reverse:
6372// vector.reverse(vector.reverse(x)) -> x
6373if (match(Op0,m_VecReverse(m_Value(X))))
6374returnX;
6375// vector.reverse(splat(X)) -> splat(X)
6376if (isSplatValue(Op0))
6377return Op0;
6378break;
6379case Intrinsic::frexp: {
6380// Frexp is idempotent with the added complication of the struct return.
6381if (match(Op0, m_ExtractValue<0>(m_Value(X)))) {
6382if (match(X, m_Intrinsic<Intrinsic::frexp>(m_Value())))
6383returnX;
6384 }
6385
6386break;
6387 }
6388default:
6389break;
6390 }
6391
6392returnnullptr;
6393}
6394
6395/// Given a min/max intrinsic, see if it can be removed based on having an
6396/// operand that is another min/max intrinsic with shared operand(s). The caller
6397/// is expected to swap the operand arguments to handle commutation.
6398staticValue *foldMinMaxSharedOp(Intrinsic::ID IID,Value *Op0,Value *Op1) {
6399Value *X, *Y;
6400if (!match(Op0,m_MaxOrMin(m_Value(X),m_Value(Y))))
6401returnnullptr;
6402
6403auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6404if (!MM0)
6405returnnullptr;
6406Intrinsic::ID IID0 = MM0->getIntrinsicID();
6407
6408if (Op1 ==X || Op1 ==Y ||
6409match(Op1,m_c_MaxOrMin(m_Specific(X),m_Specific(Y)))) {
6410// max (max X, Y), X --> max X, Y
6411if (IID0 == IID)
6412return MM0;
6413// max (min X, Y), X --> X
6414if (IID0 ==getInverseMinMaxIntrinsic(IID))
6415return Op1;
6416 }
6417returnnullptr;
6418}
6419
6420/// Given a min/max intrinsic, see if it can be removed based on having an
6421/// operand that is another min/max intrinsic with shared operand(s). The caller
6422/// is expected to swap the operand arguments to handle commutation.
6423staticValue *foldMinimumMaximumSharedOp(Intrinsic::ID IID,Value *Op0,
6424Value *Op1) {
6425assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6426 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6427"Unsupported intrinsic");
6428
6429auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6430// If Op0 is not the same intrinsic as IID, do not process.
6431// This is a difference with integer min/max handling. We do not process the
6432// case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6433if (!M0 ||M0->getIntrinsicID() != IID)
6434returnnullptr;
6435Value *X0 =M0->getOperand(0);
6436Value *Y0 =M0->getOperand(1);
6437// Simple case, m(m(X,Y), X) => m(X, Y)
6438// m(m(X,Y), Y) => m(X, Y)
6439// For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6440// For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6441// For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6442// For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6443if (X0 == Op1 || Y0 == Op1)
6444returnM0;
6445
6446auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6447if (!M1)
6448returnnullptr;
6449Value *X1 =M1->getOperand(0);
6450Value *Y1 =M1->getOperand(1);
6451Intrinsic::ID IID1 =M1->getIntrinsicID();
6452// we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6453// if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6454// For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6455// For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6456// For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6457// For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6458if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6459if (IID1 == IID ||getInverseMinMaxIntrinsic(IID1) == IID)
6460returnM0;
6461
6462returnnullptr;
6463}
6464
6465Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID,Type *ReturnType,
6466Value *Op0,Value *Op1,
6467constSimplifyQuery &Q,
6468constCallBase *Call) {
6469unsignedBitWidth = ReturnType->getScalarSizeInBits();
6470switch (IID) {
6471case Intrinsic::abs:
6472// abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6473// It is always ok to pick the earlier abs. We'll just lose nsw if its only
6474// on the outer abs.
6475if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(),m_Value())))
6476return Op0;
6477break;
6478
6479case Intrinsic::cttz: {
6480Value *X;
6481if (match(Op0,m_Shl(m_One(),m_Value(X))))
6482returnX;
6483break;
6484 }
6485case Intrinsic::ctlz: {
6486Value *X;
6487if (match(Op0,m_LShr(m_Negative(),m_Value(X))))
6488returnX;
6489if (match(Op0,m_AShr(m_Negative(),m_Value())))
6490returnConstant::getNullValue(ReturnType);
6491break;
6492 }
6493case Intrinsic::ptrmask: {
6494if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6495returnPoisonValue::get(Op0->getType());
6496
6497// NOTE: We can't apply this simplifications based on the value of Op1
6498// because we need to preserve provenance.
6499if (Q.isUndefValue(Op0) ||match(Op0,m_Zero()))
6500returnConstant::getNullValue(Op0->getType());
6501
6502assert(Op1->getType()->getScalarSizeInBits() ==
6503 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6504"Invalid mask width");
6505// If index-width (mask size) is less than pointer-size then mask is
6506// 1-extended.
6507if (match(Op1,m_PtrToInt(m_Specific(Op0))))
6508return Op0;
6509
6510// NOTE: We may have attributes associated with the return value of the
6511// llvm.ptrmask intrinsic that will be lost when we just return the
6512// operand. We should try to preserve them.
6513if (match(Op1,m_AllOnes()) || Q.isUndefValue(Op1))
6514return Op0;
6515
6516Constant *C;
6517if (match(Op1,m_ImmConstant(C))) {
6518KnownBits PtrKnown =computeKnownBits(Op0,/*Depth=*/0, Q);
6519// See if we only masking off bits we know are already zero due to
6520// alignment.
6521APInt IrrelevantPtrBits =
6522 PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6523C =ConstantFoldBinaryOpOperands(
6524 Instruction::Or,C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6525 Q.DL);
6526if (C !=nullptr &&C->isAllOnesValue())
6527return Op0;
6528 }
6529break;
6530 }
6531case Intrinsic::smax:
6532case Intrinsic::smin:
6533case Intrinsic::umax:
6534case Intrinsic::umin: {
6535// If the arguments are the same, this is a no-op.
6536if (Op0 == Op1)
6537return Op0;
6538
6539// Canonicalize immediate constant operand as Op1.
6540if (match(Op0,m_ImmConstant()))
6541std::swap(Op0, Op1);
6542
6543// Assume undef is the limit value.
6544if (Q.isUndefValue(Op1))
6545return ConstantInt::get(
6546 ReturnType,MinMaxIntrinsic::getSaturationPoint(IID,BitWidth));
6547
6548constAPInt *C;
6549if (match(Op1,m_APIntAllowPoison(C))) {
6550// Clamp to limit value. For example:
6551// umax(i8 %x, i8 255) --> 255
6552if (*C ==MinMaxIntrinsic::getSaturationPoint(IID,BitWidth))
6553return ConstantInt::get(ReturnType, *C);
6554
6555// If the constant op is the opposite of the limit value, the other must
6556// be larger/smaller or equal. For example:
6557// umin(i8 %x, i8 255) --> %x
6558if (*C ==MinMaxIntrinsic::getSaturationPoint(
6559getInverseMinMaxIntrinsic(IID),BitWidth))
6560return Op0;
6561
6562// Remove nested call if constant operands allow it. Example:
6563// max (max X, 7), 5 -> max X, 7
6564auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6565if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6566// TODO: loosen undef/splat restrictions for vector constants.
6567Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6568constAPInt *InnerC;
6569if ((match(M00,m_APInt(InnerC)) ||match(M01,m_APInt(InnerC))) &&
6570ICmpInst::compare(*InnerC, *C,
6571 ICmpInst::getNonStrictPredicate(
6572MinMaxIntrinsic::getPredicate(IID))))
6573return Op0;
6574 }
6575 }
6576
6577if (Value *V =foldMinMaxSharedOp(IID, Op0, Op1))
6578return V;
6579if (Value *V =foldMinMaxSharedOp(IID, Op1, Op0))
6580return V;
6581
6582ICmpInst::Predicate Pred =
6583 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
6584if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(),RecursionLimit))
6585return Op0;
6586if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(),RecursionLimit))
6587return Op1;
6588
6589break;
6590 }
6591case Intrinsic::scmp:
6592case Intrinsic::ucmp: {
6593// Fold to a constant if the relationship between operands can be
6594// established with certainty
6595if (isICmpTrue(CmpInst::ICMP_EQ, Op0, Op1, Q,RecursionLimit))
6596returnConstant::getNullValue(ReturnType);
6597
6598ICmpInst::Predicate PredGT =
6599 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6600if (isICmpTrue(PredGT, Op0, Op1, Q,RecursionLimit))
6601return ConstantInt::get(ReturnType, 1);
6602
6603ICmpInst::Predicate PredLT =
6604 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6605if (isICmpTrue(PredLT, Op0, Op1, Q,RecursionLimit))
6606returnConstantInt::getSigned(ReturnType, -1);
6607
6608break;
6609 }
6610case Intrinsic::usub_with_overflow:
6611case Intrinsic::ssub_with_overflow:
6612// X - X -> { 0, false }
6613// X - undef -> { 0, false }
6614// undef - X -> { 0, false }
6615if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6616returnConstant::getNullValue(ReturnType);
6617break;
6618case Intrinsic::uadd_with_overflow:
6619case Intrinsic::sadd_with_overflow:
6620// X + undef -> { -1, false }
6621// undef + x -> { -1, false }
6622if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6623returnConstantStruct::get(
6624 cast<StructType>(ReturnType),
6625 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6626Constant::getNullValue(ReturnType->getStructElementType(1))});
6627 }
6628break;
6629case Intrinsic::umul_with_overflow:
6630case Intrinsic::smul_with_overflow:
6631// 0 * X -> { 0, false }
6632// X * 0 -> { 0, false }
6633if (match(Op0,m_Zero()) ||match(Op1,m_Zero()))
6634returnConstant::getNullValue(ReturnType);
6635// undef * X -> { 0, false }
6636// X * undef -> { 0, false }
6637if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6638returnConstant::getNullValue(ReturnType);
6639break;
6640case Intrinsic::uadd_sat:
6641// sat(MAX + X) -> MAX
6642// sat(X + MAX) -> MAX
6643if (match(Op0,m_AllOnes()) ||match(Op1,m_AllOnes()))
6644returnConstant::getAllOnesValue(ReturnType);
6645 [[fallthrough]];
6646case Intrinsic::sadd_sat:
6647// sat(X + undef) -> -1
6648// sat(undef + X) -> -1
6649// For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6650// For signed: Assume undef is ~X, in which case X + ~X = -1.
6651if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6652returnConstant::getAllOnesValue(ReturnType);
6653
6654// X + 0 -> X
6655if (match(Op1,m_Zero()))
6656return Op0;
6657// 0 + X -> X
6658if (match(Op0,m_Zero()))
6659return Op1;
6660break;
6661case Intrinsic::usub_sat:
6662// sat(0 - X) -> 0, sat(X - MAX) -> 0
6663if (match(Op0,m_Zero()) ||match(Op1,m_AllOnes()))
6664returnConstant::getNullValue(ReturnType);
6665 [[fallthrough]];
6666case Intrinsic::ssub_sat:
6667// X - X -> 0, X - undef -> 0, undef - X -> 0
6668if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6669returnConstant::getNullValue(ReturnType);
6670// X - 0 -> X
6671if (match(Op1,m_Zero()))
6672return Op0;
6673break;
6674case Intrinsic::load_relative:
6675if (auto *C0 = dyn_cast<Constant>(Op0))
6676if (auto *C1 = dyn_cast<Constant>(Op1))
6677returnsimplifyRelativeLoad(C0, C1, Q.DL);
6678break;
6679case Intrinsic::powi:
6680if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6681// powi(x, 0) -> 1.0
6682if (Power->isZero())
6683return ConstantFP::get(Op0->getType(), 1.0);
6684// powi(x, 1) -> x
6685if (Power->isOne())
6686return Op0;
6687 }
6688break;
6689case Intrinsic::ldexp:
6690returnsimplifyLdexp(Op0, Op1, Q,false);
6691case Intrinsic::copysign:
6692// copysign X, X --> X
6693if (Op0 == Op1)
6694return Op0;
6695// copysign -X, X --> X
6696// copysign X, -X --> -X
6697if (match(Op0,m_FNeg(m_Specific(Op1))) ||
6698match(Op1,m_FNeg(m_Specific(Op0))))
6699return Op1;
6700break;
6701case Intrinsic::is_fpclass: {
6702if (isa<PoisonValue>(Op0))
6703returnPoisonValue::get(ReturnType);
6704
6705uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6706// If all tests are made, it doesn't matter what the value is.
6707if ((Mask &fcAllFlags) ==fcAllFlags)
6708return ConstantInt::get(ReturnType,true);
6709if ((Mask &fcAllFlags) == 0)
6710return ConstantInt::get(ReturnType,false);
6711if (Q.isUndefValue(Op0))
6712returnUndefValue::get(ReturnType);
6713break;
6714 }
6715case Intrinsic::maxnum:
6716case Intrinsic::minnum:
6717case Intrinsic::maximum:
6718case Intrinsic::minimum: {
6719// If the arguments are the same, this is a no-op.
6720if (Op0 == Op1)
6721return Op0;
6722
6723// Canonicalize constant operand as Op1.
6724if (isa<Constant>(Op0))
6725std::swap(Op0, Op1);
6726
6727// If an argument is undef, return the other argument.
6728if (Q.isUndefValue(Op1))
6729return Op0;
6730
6731bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6732bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6733
6734// minnum(X, nan) -> X
6735// maxnum(X, nan) -> X
6736// minimum(X, nan) -> nan
6737// maximum(X, nan) -> nan
6738if (match(Op1,m_NaN()))
6739return PropagateNaN ?propagateNaN(cast<Constant>(Op1)) : Op0;
6740
6741// In the following folds, inf can be replaced with the largest finite
6742// float, if the ninf flag is set.
6743constAPFloat *C;
6744if (match(Op1,m_APFloat(C)) &&
6745 (C->isInfinity() || (Call && Call->hasNoInfs() &&C->isLargest()))) {
6746// minnum(X, -inf) -> -inf
6747// maxnum(X, +inf) -> +inf
6748// minimum(X, -inf) -> -inf if nnan
6749// maximum(X, +inf) -> +inf if nnan
6750if (C->isNegative() == IsMin &&
6751 (!PropagateNaN || (Call && Call->hasNoNaNs())))
6752return ConstantFP::get(ReturnType, *C);
6753
6754// minnum(X, +inf) -> X if nnan
6755// maxnum(X, -inf) -> X if nnan
6756// minimum(X, +inf) -> X
6757// maximum(X, -inf) -> X
6758if (C->isNegative() != IsMin &&
6759 (PropagateNaN || (Call && Call->hasNoNaNs())))
6760return Op0;
6761 }
6762
6763// Min/max of the same operation with common operand:
6764// m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6765if (Value *V =foldMinimumMaximumSharedOp(IID, Op0, Op1))
6766return V;
6767if (Value *V =foldMinimumMaximumSharedOp(IID, Op1, Op0))
6768return V;
6769
6770break;
6771 }
6772case Intrinsic::vector_extract: {
6773// (extract_vector (insert_vector _, X, 0), 0) -> X
6774unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6775Value *X =nullptr;
6776if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(),m_Value(X),
6777m_Zero())) &&
6778 IdxN == 0 &&X->getType() == ReturnType)
6779returnX;
6780
6781break;
6782 }
6783default:
6784break;
6785 }
6786
6787returnnullptr;
6788}
6789
6790staticValue *simplifyIntrinsic(CallBase *Call,Value *Callee,
6791ArrayRef<Value *> Args,
6792constSimplifyQuery &Q) {
6793// Operand bundles should not be in Args.
6794assert(Call->arg_size() == Args.size());
6795unsigned NumOperands = Args.size();
6796Function *F = cast<Function>(Callee);
6797Intrinsic::ID IID =F->getIntrinsicID();
6798
6799// Most of the intrinsics with no operands have some kind of side effect.
6800// Don't simplify.
6801if (!NumOperands) {
6802switch (IID) {
6803case Intrinsic::vscale: {
6804Type *RetTy =F->getReturnType();
6805ConstantRange CR =getVScaleRange(Call->getFunction(), 64);
6806if (constAPInt *C = CR.getSingleElement())
6807return ConstantInt::get(RetTy,C->getZExtValue());
6808returnnullptr;
6809 }
6810default:
6811returnnullptr;
6812 }
6813 }
6814
6815if (NumOperands == 1)
6816returnsimplifyUnaryIntrinsic(F, Args[0], Q, Call);
6817
6818if (NumOperands == 2)
6819returnsimplifyBinaryIntrinsic(IID,F->getReturnType(), Args[0], Args[1], Q,
6820 Call);
6821
6822// Handle intrinsics with 3 or more arguments.
6823switch (IID) {
6824case Intrinsic::masked_load:
6825case Intrinsic::masked_gather: {
6826Value *MaskArg = Args[2];
6827Value *PassthruArg = Args[3];
6828// If the mask is all zeros or undef, the "passthru" argument is the result.
6829if (maskIsAllZeroOrUndef(MaskArg))
6830return PassthruArg;
6831returnnullptr;
6832 }
6833case Intrinsic::fshl:
6834case Intrinsic::fshr: {
6835Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6836
6837// If both operands are undef, the result is undef.
6838if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
6839returnUndefValue::get(F->getReturnType());
6840
6841// If shift amount is undef, assume it is zero.
6842if (Q.isUndefValue(ShAmtArg))
6843return Args[IID == Intrinsic::fshl ? 0 : 1];
6844
6845constAPInt *ShAmtC;
6846if (match(ShAmtArg,m_APInt(ShAmtC))) {
6847// If there's effectively no shift, return the 1st arg or 2nd arg.
6848APIntBitWidth =APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
6849if (ShAmtC->urem(BitWidth).isZero())
6850return Args[IID == Intrinsic::fshl ? 0 : 1];
6851 }
6852
6853// Rotating zero by anything is zero.
6854if (match(Op0,m_Zero()) &&match(Op1,m_Zero()))
6855return ConstantInt::getNullValue(F->getReturnType());
6856
6857// Rotating -1 by anything is -1.
6858if (match(Op0,m_AllOnes()) &&match(Op1,m_AllOnes()))
6859return ConstantInt::getAllOnesValue(F->getReturnType());
6860
6861returnnullptr;
6862 }
6863case Intrinsic::experimental_constrained_fma: {
6864auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6865if (Value *V =simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
6866 *FPI->getRoundingMode()))
6867return V;
6868returnnullptr;
6869 }
6870case Intrinsic::fma:
6871case Intrinsic::fmuladd: {
6872if (Value *V =simplifyFPOp(Args, {}, Q,fp::ebIgnore,
6873 RoundingMode::NearestTiesToEven))
6874return V;
6875returnnullptr;
6876 }
6877case Intrinsic::smul_fix:
6878case Intrinsic::smul_fix_sat: {
6879Value *Op0 = Args[0];
6880Value *Op1 = Args[1];
6881Value *Op2 = Args[2];
6882Type *ReturnType =F->getReturnType();
6883
6884// Canonicalize constant operand as Op1 (ConstantFolding handles the case
6885// when both Op0 and Op1 are constant so we do not care about that special
6886// case here).
6887if (isa<Constant>(Op0))
6888std::swap(Op0, Op1);
6889
6890// X * 0 -> 0
6891if (match(Op1,m_Zero()))
6892returnConstant::getNullValue(ReturnType);
6893
6894// X * undef -> 0
6895if (Q.isUndefValue(Op1))
6896returnConstant::getNullValue(ReturnType);
6897
6898// X * (1 << Scale) -> X
6899APInt ScaledOne =
6900APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6901 cast<ConstantInt>(Op2)->getZExtValue());
6902if (ScaledOne.isNonNegative() &&match(Op1,m_SpecificInt(ScaledOne)))
6903return Op0;
6904
6905returnnullptr;
6906 }
6907case Intrinsic::vector_insert: {
6908Value *Vec = Args[0];
6909Value *SubVec = Args[1];
6910Value *Idx = Args[2];
6911Type *ReturnType =F->getReturnType();
6912
6913// (insert_vector Y, (extract_vector X, 0), 0) -> X
6914// where: Y is X, or Y is undef
6915unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6916Value *X =nullptr;
6917if (match(SubVec,
6918 m_Intrinsic<Intrinsic::vector_extract>(m_Value(X),m_Zero())) &&
6919 (Q.isUndefValue(Vec) || Vec ==X) && IdxN == 0 &&
6920X->getType() == ReturnType)
6921returnX;
6922
6923returnnullptr;
6924 }
6925case Intrinsic::experimental_constrained_fadd: {
6926auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6927returnsimplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6928 *FPI->getExceptionBehavior(),
6929 *FPI->getRoundingMode());
6930 }
6931case Intrinsic::experimental_constrained_fsub: {
6932auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6933returnsimplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6934 *FPI->getExceptionBehavior(),
6935 *FPI->getRoundingMode());
6936 }
6937case Intrinsic::experimental_constrained_fmul: {
6938auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6939returnsimplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6940 *FPI->getExceptionBehavior(),
6941 *FPI->getRoundingMode());
6942 }
6943case Intrinsic::experimental_constrained_fdiv: {
6944auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6945returnsimplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6946 *FPI->getExceptionBehavior(),
6947 *FPI->getRoundingMode());
6948 }
6949case Intrinsic::experimental_constrained_frem: {
6950auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6951returnsimplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6952 *FPI->getExceptionBehavior(),
6953 *FPI->getRoundingMode());
6954 }
6955case Intrinsic::experimental_constrained_ldexp:
6956returnsimplifyLdexp(Args[0], Args[1], Q,true);
6957case Intrinsic::experimental_gc_relocate: {
6958GCRelocateInst &GCR = *cast<GCRelocateInst>(Call);
6959Value *DerivedPtr = GCR.getDerivedPtr();
6960Value *BasePtr = GCR.getBasePtr();
6961
6962// Undef is undef, even after relocation.
6963if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
6964returnUndefValue::get(GCR.getType());
6965 }
6966
6967if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
6968// For now, the assumption is that the relocation of null will be null
6969// for most any collector. If this ever changes, a corresponding hook
6970// should be added to GCStrategy and this code should check it first.
6971if (isa<ConstantPointerNull>(DerivedPtr)) {
6972// Use null-pointer of gc_relocate's type to replace it.
6973returnConstantPointerNull::get(PT);
6974 }
6975 }
6976returnnullptr;
6977 }
6978default:
6979returnnullptr;
6980 }
6981}
6982
6983staticValue *tryConstantFoldCall(CallBase *Call,Value *Callee,
6984ArrayRef<Value *> Args,
6985constSimplifyQuery &Q) {
6986auto *F = dyn_cast<Function>(Callee);
6987if (!F || !canConstantFoldCallTo(Call,F))
6988returnnullptr;
6989
6990SmallVector<Constant *, 4> ConstantArgs;
6991 ConstantArgs.reserve(Args.size());
6992for (Value *Arg : Args) {
6993Constant *C = dyn_cast<Constant>(Arg);
6994if (!C) {
6995if (isa<MetadataAsValue>(Arg))
6996continue;
6997returnnullptr;
6998 }
6999 ConstantArgs.push_back(C);
7000 }
7001
7002returnConstantFoldCall(Call,F, ConstantArgs, Q.TLI);
7003}
7004
7005Value *llvm::simplifyCall(CallBase *Call,Value *Callee,ArrayRef<Value *> Args,
7006constSimplifyQuery &Q) {
7007// Args should not contain operand bundle operands.
7008assert(Call->arg_size() == Args.size());
7009
7010// musttail calls can only be simplified if they are also DCEd.
7011// As we can't guarantee this here, don't simplify them.
7012if (Call->isMustTailCall())
7013returnnullptr;
7014
7015// call undef -> poison
7016// call null -> poison
7017if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
7018returnPoisonValue::get(Call->getType());
7019
7020if (Value *V =tryConstantFoldCall(Call, Callee, Args, Q))
7021return V;
7022
7023auto *F = dyn_cast<Function>(Callee);
7024if (F &&F->isIntrinsic())
7025if (Value *Ret =simplifyIntrinsic(Call, Callee, Args, Q))
7026return Ret;
7027
7028returnnullptr;
7029}
7030
7031Value *llvm::simplifyConstrainedFPCall(CallBase *Call,constSimplifyQuery &Q) {
7032assert(isa<ConstrainedFPIntrinsic>(Call));
7033SmallVector<Value *, 4> Args(Call->args());
7034if (Value *V =tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
7035return V;
7036if (Value *Ret =simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
7037return Ret;
7038returnnullptr;
7039}
7040
7041/// Given operands for a Freeze, see if we can fold the result.
7042staticValue *simplifyFreezeInst(Value *Op0,constSimplifyQuery &Q) {
7043// Use a utility function defined in ValueTracking.
7044if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
7045return Op0;
7046// We have room for improvement.
7047returnnullptr;
7048}
7049
7050Value *llvm::simplifyFreezeInst(Value *Op0,constSimplifyQuery &Q) {
7051 return ::simplifyFreezeInst(Op0, Q);
7052}
7053
7054Value *llvm::simplifyLoadInst(LoadInst *LI,Value *PtrOp,
7055constSimplifyQuery &Q) {
7056if (LI->isVolatile())
7057returnnullptr;
7058
7059if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7060returnConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
7061
7062// We can only fold the load if it is from a constant global with definitive
7063// initializer. Skip expensive logic if this is not the case.
7064auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
7065if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7066returnnullptr;
7067
7068// If GlobalVariable's initializer is uniform, then return the constant
7069// regardless of its offset.
7070if (Constant *C =ConstantFoldLoadFromUniformValue(GV->getInitializer(),
7071 LI->getType(), Q.DL))
7072returnC;
7073
7074// Try to convert operand into a constant by stripping offsets while looking
7075// through invariant.group intrinsics.
7076APIntOffset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
7077 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
7078 Q.DL,Offset,/* AllowNonInbounts */true,
7079/* AllowInvariantGroup */true);
7080if (PtrOp == GV) {
7081// Index size may have changed due to address space casts.
7082Offset =Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
7083returnConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
7084 Q.DL);
7085 }
7086
7087returnnullptr;
7088}
7089
7090/// See if we can compute a simplified version of this instruction.
7091/// If not, this returns null.
7092
7093staticValue *simplifyInstructionWithOperands(Instruction *I,
7094ArrayRef<Value *> NewOps,
7095constSimplifyQuery &SQ,
7096unsigned MaxRecurse) {
7097assert(I->getFunction() &&"instruction should be inserted in a function");
7098assert((!SQ.CxtI || SQ.CxtI->getFunction() ==I->getFunction()) &&
7099"context instruction should be in the same function");
7100
7101constSimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
7102
7103switch (I->getOpcode()) {
7104default:
7105if (llvm::all_of(NewOps, [](Value *V) {return isa<Constant>(V); })) {
7106SmallVector<Constant *, 8> NewConstOps(NewOps.size());
7107transform(NewOps, NewConstOps.begin(),
7108 [](Value *V) { return cast<Constant>(V); });
7109returnConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
7110 }
7111returnnullptr;
7112case Instruction::FNeg:
7113returnsimplifyFNegInst(NewOps[0],I->getFastMathFlags(), Q, MaxRecurse);
7114case Instruction::FAdd:
7115returnsimplifyFAddInst(NewOps[0], NewOps[1],I->getFastMathFlags(), Q,
7116 MaxRecurse);
7117case Instruction::Add:
7118returnsimplifyAddInst(
7119 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7120 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7121case Instruction::FSub:
7122returnsimplifyFSubInst(NewOps[0], NewOps[1],I->getFastMathFlags(), Q,
7123 MaxRecurse);
7124case Instruction::Sub:
7125returnsimplifySubInst(
7126 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7127 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7128case Instruction::FMul:
7129returnsimplifyFMulInst(NewOps[0], NewOps[1],I->getFastMathFlags(), Q,
7130 MaxRecurse);
7131case Instruction::Mul:
7132returnsimplifyMulInst(
7133 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7134 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7135case Instruction::SDiv:
7136returnsimplifySDivInst(NewOps[0], NewOps[1],
7137 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7138 MaxRecurse);
7139case Instruction::UDiv:
7140returnsimplifyUDivInst(NewOps[0], NewOps[1],
7141 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7142 MaxRecurse);
7143case Instruction::FDiv:
7144returnsimplifyFDivInst(NewOps[0], NewOps[1],I->getFastMathFlags(), Q,
7145 MaxRecurse);
7146case Instruction::SRem:
7147returnsimplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7148case Instruction::URem:
7149returnsimplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7150case Instruction::FRem:
7151returnsimplifyFRemInst(NewOps[0], NewOps[1],I->getFastMathFlags(), Q,
7152 MaxRecurse);
7153case Instruction::Shl:
7154returnsimplifyShlInst(
7155 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7156 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7157case Instruction::LShr:
7158returnsimplifyLShrInst(NewOps[0], NewOps[1],
7159 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7160 MaxRecurse);
7161case Instruction::AShr:
7162returnsimplifyAShrInst(NewOps[0], NewOps[1],
7163 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7164 MaxRecurse);
7165case Instruction::And:
7166returnsimplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7167case Instruction::Or:
7168returnsimplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7169case Instruction::Xor:
7170returnsimplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7171case Instruction::ICmp:
7172returnsimplifyICmpInst(cast<ICmpInst>(I)->getCmpPredicate(), NewOps[0],
7173 NewOps[1], Q, MaxRecurse);
7174case Instruction::FCmp:
7175returnsimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7176 NewOps[1],I->getFastMathFlags(), Q, MaxRecurse);
7177case Instruction::Select:
7178returnsimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7179case Instruction::GetElementPtr: {
7180auto *GEPI = cast<GetElementPtrInst>(I);
7181returnsimplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7182ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7183 MaxRecurse);
7184 }
7185case Instruction::InsertValue: {
7186InsertValueInst *IV = cast<InsertValueInst>(I);
7187returnsimplifyInsertValueInst(NewOps[0], NewOps[1],IV->getIndices(), Q,
7188 MaxRecurse);
7189 }
7190case Instruction::InsertElement:
7191returnsimplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7192case Instruction::ExtractValue: {
7193auto *EVI = cast<ExtractValueInst>(I);
7194returnsimplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7195 MaxRecurse);
7196 }
7197case Instruction::ExtractElement:
7198returnsimplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7199case Instruction::ShuffleVector: {
7200auto *SVI = cast<ShuffleVectorInst>(I);
7201returnsimplifyShuffleVectorInst(NewOps[0], NewOps[1],
7202 SVI->getShuffleMask(), SVI->getType(), Q,
7203 MaxRecurse);
7204 }
7205case Instruction::PHI:
7206returnsimplifyPHINode(cast<PHINode>(I), NewOps, Q);
7207case Instruction::Call:
7208returnsimplifyCall(
7209 cast<CallInst>(I), NewOps.back(),
7210 NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7211case Instruction::Freeze:
7212returnllvm::simplifyFreezeInst(NewOps[0], Q);
7213#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7214#include "llvm/IR/Instruction.def"
7215#undef HANDLE_CAST_INST
7216returnsimplifyCastInst(I->getOpcode(), NewOps[0],I->getType(), Q,
7217 MaxRecurse);
7218case Instruction::Alloca:
7219// No simplifications for Alloca and it can't be constant folded.
7220returnnullptr;
7221case Instruction::Load:
7222returnsimplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7223 }
7224}
7225
7226Value *llvm::simplifyInstructionWithOperands(Instruction *I,
7227ArrayRef<Value *> NewOps,
7228constSimplifyQuery &SQ) {
7229assert(NewOps.size() ==I->getNumOperands() &&
7230"Number of operands should match the instruction!");
7231 return ::simplifyInstructionWithOperands(I, NewOps, SQ,RecursionLimit);
7232}
7233
7234Value *llvm::simplifyInstruction(Instruction *I,constSimplifyQuery &SQ) {
7235SmallVector<Value *, 8> Ops(I->operands());
7236Value *Result =::simplifyInstructionWithOperands(I, Ops, SQ,RecursionLimit);
7237
7238 /// If called on unreachable code, the instruction may simplify to itself.
7239 /// Make life easier for users by detecting that case here, and returning a
7240 /// safe value instead.
7241return Result ==I ?PoisonValue::get(I->getType()) : Result;
7242}
7243
7244/// Implementation of recursive simplification through an instruction's
7245/// uses.
7246///
7247/// This is the common implementation of the recursive simplification routines.
7248/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7249/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7250/// instructions to process and attempt to simplify it using
7251/// InstructionSimplify. Recursively visited users which could not be
7252/// simplified themselves are to the optional UnsimplifiedUsers set for
7253/// further processing by the caller.
7254///
7255/// This routine returns 'true' only when *it* simplifies something. The passed
7256/// in simplified value does not count toward this.
7257staticboolreplaceAndRecursivelySimplifyImpl(
7258Instruction *I,Value *SimpleV,constTargetLibraryInfo *TLI,
7259constDominatorTree *DT,AssumptionCache *AC,
7260SmallSetVector<Instruction *, 8> *UnsimplifiedUsers =nullptr) {
7261bool Simplified =false;
7262SmallSetVector<Instruction *, 8> Worklist;
7263constDataLayout &DL =I->getDataLayout();
7264
7265// If we have an explicit value to collapse to, do that round of the
7266// simplification loop by hand initially.
7267if (SimpleV) {
7268for (User *U :I->users())
7269if (U !=I)
7270 Worklist.insert(cast<Instruction>(U));
7271
7272// Replace the instruction with its simplified value.
7273I->replaceAllUsesWith(SimpleV);
7274
7275if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7276I->eraseFromParent();
7277 }else {
7278 Worklist.insert(I);
7279 }
7280
7281// Note that we must test the size on each iteration, the worklist can grow.
7282for (unsignedIdx = 0;Idx != Worklist.size(); ++Idx) {
7283I = Worklist[Idx];
7284
7285// See if this instruction simplifies.
7286 SimpleV =simplifyInstruction(I, {DL, TLI, DT, AC});
7287if (!SimpleV) {
7288if (UnsimplifiedUsers)
7289 UnsimplifiedUsers->insert(I);
7290continue;
7291 }
7292
7293 Simplified =true;
7294
7295// Stash away all the uses of the old instruction so we can check them for
7296// recursive simplifications after a RAUW. This is cheaper than checking all
7297// uses of To on the recursive step in most cases.
7298for (User *U :I->users())
7299 Worklist.insert(cast<Instruction>(U));
7300
7301// Replace the instruction with its simplified value.
7302I->replaceAllUsesWith(SimpleV);
7303
7304if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7305I->eraseFromParent();
7306 }
7307return Simplified;
7308}
7309
7310boolllvm::replaceAndRecursivelySimplify(
7311Instruction *I,Value *SimpleV,constTargetLibraryInfo *TLI,
7312constDominatorTree *DT,AssumptionCache *AC,
7313SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7314assert(I != SimpleV &&"replaceAndRecursivelySimplify(X,X) is not valid!");
7315assert(SimpleV &&"Must provide a simplified value.");
7316returnreplaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7317 UnsimplifiedUsers);
7318}
7319
7320namespacellvm {
7321constSimplifyQuerygetBestSimplifyQuery(Pass &P,Function &F) {
7322auto *DTWP =P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7323auto *DT = DTWP ? &DTWP->getDomTree() :nullptr;
7324auto *TLIWP =P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7325auto *TLI = TLIWP ? &TLIWP->getTLI(F) :nullptr;
7326auto *ACWP =P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7327auto *AC = ACWP ? &ACWP->getAssumptionCache(F) :nullptr;
7328return {F.getDataLayout(), TLI, DT, AC};
7329}
7330
7331constSimplifyQuerygetBestSimplifyQuery(LoopStandardAnalysisResults &AR,
7332constDataLayout &DL) {
7333return {DL, &AR.TLI, &AR.DT, &AR.AC};
7334}
7335
7336template <classT,class... TArgs>
7337constSimplifyQuerygetBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
7338Function &F) {
7339auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7340auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7341auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7342return {F.getDataLayout(), TLI, DT, AC};
7343}
7344templateconstSimplifyQuerygetBestSimplifyQuery(AnalysisManager<Function> &,
7345Function &);
7346
7347boolSimplifyQuery::isUndefValue(Value *V) const{
7348if (!CanUseUndef)
7349returnfalse;
7350
7351returnmatch(V,m_Undef());
7352}
7353
7354}// namespace llvm
7355
7356void InstSimplifyFolder::anchor() {}
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition:ARMSLSHardening.cpp:73
AliasAnalysis.h
AssumptionCache.h
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
CaptureTracking.h
CmpInstAnalysis.h
ConstantFolding.h
ConstantRange.h
DataLayout.h
RetTy
return RetTy
Definition:DeadArgumentElimination.cpp:361
Idx
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Definition:DeadArgumentElimination.cpp:353
Dominators.h
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
MI
IRTranslator LLVM IR MI
Definition:IRTranslator.cpp:112
Operator.h
InstSimplifyFolder.h
InstrTypes.h
simplifyFreezeInst
static Value * simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q)
Given operands for a Freeze, see if we can fold the result.
Definition:InstructionSimplify.cpp:7042
simplifyCmpSelFalseCase
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
Definition:InstructionSimplify.cpp:134
simplifyCmpSelCase
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
Definition:InstructionSimplify.cpp:110
simplifySelectWithFakeICmpEq
static Value * simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not uses sgt/slt instead of eq/ne.
Definition:InstructionSimplify.cpp:4617
simplifyLShrInst
static Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
Definition:InstructionSimplify.cpp:1436
simplifyUDivInst
static Value * simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
Definition:InstructionSimplify.cpp:1222
simplifyShuffleVectorInst
static Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q, unsigned MaxRecurse)
Definition:InstructionSimplify.cpp:5452
foldMinMaxSharedOp
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
Definition:InstructionSimplify.cpp:6398
simplifySubInst
static Value * simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
Definition:InstructionSimplify.cpp:741
simplifyFCmpInst
static Value * simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:4062
expandCommutativeBinOp
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
Definition:InstructionSimplify.cpp:227
foldOrCommuteConstant
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:561
haveNonOverlappingStorage
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
Definition:InstructionSimplify.cpp:2612
foldConstant
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:5574
handleOtherCmpSelSimplifications
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
Definition:InstructionSimplify.cpp:143
threadCmpOverPHI
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
Definition:InstructionSimplify.cpp:520
propagateNaN
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
Definition:InstructionSimplify.cpp:5603
simplifyICmpOfBools
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
Definition:InstructionSimplify.cpp:2831
simplifyICmpWithBinOpOnLHS
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Definition:InstructionSimplify.cpp:3105
simplifyAShrInst
static Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
Definition:InstructionSimplify.cpp:1473
simplifyRelativeLoad
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
Definition:InstructionSimplify.cpp:6177
simplifyDiv
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
Definition:InstructionSimplify.cpp:1134
simplifyPHINode
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
Definition:InstructionSimplify.cpp:5305
simplifyExtractValueInst
static Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:5224
simplifySelectInst
static Value * simplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a SelectInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:4823
simplifyAddInst
static Value * simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
Definition:InstructionSimplify.cpp:589
simplifyUnOp
static Value * simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
Definition:InstructionSimplify.cpp:6013
isSameCompare
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
Definition:InstructionSimplify.cpp:93
simplifyAndCommutative
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Definition:InstructionSimplify.cpp:1971
simplifyInstructionWithOperands
static Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &SQ, unsigned MaxRecurse)
See if we can compute a simplified version of this instruction.
Definition:InstructionSimplify.cpp:7093
isIdempotent
static bool isIdempotent(Intrinsic::ID ID)
Definition:InstructionSimplify.cpp:6139
getRange
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
Definition:InstructionSimplify.cpp:3725
simplifyAndOrOfICmpsWithCtpop
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
Definition:InstructionSimplify.cpp:1700
simplifyUnsignedRangeCheck
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
Definition:InstructionSimplify.cpp:1506
tryConstantFoldCall
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:6983
simplifyWithOpsReplaced
static Value * simplifyWithOpsReplaced(Value *V, ArrayRef< std::pair< Value *, Value * > > Ops, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
Definition:InstructionSimplify.cpp:4294
simplifyICmpInst
static Value * simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:3741
simplifyExtractElementInst
static Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:5254
simplifyAndOfICmpsWithAdd
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
Definition:InstructionSimplify.cpp:1652
simplifyICmpWithMinMax
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
Definition:InstructionSimplify.cpp:3484
MonotonicType
MonotonicType
Definition:InstructionSimplify.cpp:3042
MonotonicType::LowerEq
@ LowerEq
MonotonicType::GreaterEq
@ GreaterEq
simplifyCmpSelTrueCase
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
Definition:InstructionSimplify.cpp:126
simplifyIntrinsic
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:6790
getUnsignedMonotonicValues
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
Definition:InstructionSimplify.cpp:3045
isPoisonShift
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
Definition:InstructionSimplify.cpp:1265
stripAndComputeConstantOffsets
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
Definition:InstructionSimplify.cpp:670
simplifyCmpInst
static Value * simplifyCmpInst(CmpPredicate, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:6127
simplifyFMAFMul
static Value * simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Definition:InstructionSimplify.cpp:5819
simplifyRightShift
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
Definition:InstructionSimplify.cpp:1366
simplifyICmpWithIntrinsicOnLHS
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
Definition:InstructionSimplify.cpp:3692
simplifySDivInst
static Value * simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
Definition:InstructionSimplify.cpp:1206
simplifyByDomEq
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
Definition:InstructionSimplify.cpp:707
simplifyFPUnOp
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
Definition:InstructionSimplify.cpp:6026
simplifyICmpWithBinOp
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
Definition:InstructionSimplify.cpp:3250
simplifyFAddInst
static Value * simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, see if we can fold the result.
Definition:InstructionSimplify.cpp:5687
simplifyOrOfICmps
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:1790
expandBinOp
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
Definition:InstructionSimplify.cpp:193
simplifyICmpWithZero
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
Definition:InstructionSimplify.cpp:2934
simplifyICmpWithConstant
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
Definition:InstructionSimplify.cpp:2994
simplifySelectWithFCmp
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
Definition:InstructionSimplify.cpp:4773
getFalse
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
Definition:InstructionSimplify.cpp:86
simplifyDivRem
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
Definition:InstructionSimplify.cpp:1040
removesFPFraction
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
Definition:InstructionSimplify.cpp:6161
simplifyFDivInst
static Value * simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Definition:InstructionSimplify.cpp:5913
simplifyOrOfICmpsWithAdd
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
Definition:InstructionSimplify.cpp:1743
simplifySelectWithEquivalence
static Value * simplifySelectWithEquivalence(ArrayRef< std::pair< Value *, Value * > > Replacements, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
Definition:InstructionSimplify.cpp:4629
simplifyMulInst
static Value * simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
Definition:InstructionSimplify.cpp:892
simplifyFNegInst
static Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given the operand for an FNeg, see if we can fold the result.
Definition:InstructionSimplify.cpp:5583
simplifyOrInst
static Value * simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an Or, see if we can fold the result.
Definition:InstructionSimplify.cpp:2299
trySimplifyICmpWithAdds
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
Definition:InstructionSimplify.cpp:3225
simplifySelectBitTest
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
Definition:InstructionSimplify.cpp:4502
simplifyAssociativeBinOp
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
Definition:InstructionSimplify.cpp:245
simplifyShlInst
static Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
Definition:InstructionSimplify.cpp:1395
threadBinOpOverPHI
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
Definition:InstructionSimplify.cpp:473
simplifyCmpSelOfMaxMin
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
Definition:InstructionSimplify.cpp:4543
simplifyFRemInst
static Value * simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Definition:InstructionSimplify.cpp:5972
simplifyFSubInst
static Value * simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, see if we can fold the result.
Definition:InstructionSimplify.cpp:5753
simplifyXorInst
static Value * simplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a Xor, see if we can fold the result.
Definition:InstructionSimplify.cpp:2483
simplifyURemInst
static Value * simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
Definition:InstructionSimplify.cpp:1255
simplifyFPOp
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
Definition:InstructionSimplify.cpp:5645
RecursionLimit
@ RecursionLimit
Definition:InstructionSimplify.cpp:52
threadCmpOverSelect
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
Definition:InstructionSimplify.cpp:428
replaceAndRecursivelySimplifyImpl
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
Definition:InstructionSimplify.cpp:7257
isAllocDisjoint
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
Definition:InstructionSimplify.cpp:2592
getTrue
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
Definition:InstructionSimplify.cpp:90
simplifyGEPInst
static Value * simplifyGEPInst(Type *, Value *, ArrayRef< Value * >, GEPNoWrapFlags, const SimplifyQuery &, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:5004
isDivZero
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
Definition:InstructionSimplify.cpp:974
simplifyLdexp
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
Definition:InstructionSimplify.cpp:6230
simplifyLogicOfAddSub
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
Definition:InstructionSimplify.cpp:1948
simplifyOrLogic
static Value * simplifyOrLogic(Value *X, Value *Y)
Definition:InstructionSimplify.cpp:2208
getCompareTy
static Type * getCompareTy(Value *Op)
Definition:InstructionSimplify.cpp:2566
simplifyCastInst
static Value * simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned)
Definition:InstructionSimplify.cpp:5357
simplifyAndOfICmps
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:1720
simplifyBinOp
static Value * simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
Definition:InstructionSimplify.cpp:6048
isICmpTrue
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
Definition:InstructionSimplify.cpp:965
simplifyInsertValueInst
static Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
Definition:InstructionSimplify.cpp:5145
simplifyAndInst
static Value * simplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an And, see if we can fold the result.
Definition:InstructionSimplify.cpp:2024
foldIdentityShuffles
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
Definition:InstructionSimplify.cpp:5407
simplifyAndOrOfFCmps
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
Definition:InstructionSimplify.cpp:1813
extractEquivalentCondition
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
Definition:InstructionSimplify.cpp:2573
simplifyAndOrOfCmps
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
Definition:InstructionSimplify.cpp:1854
simplifyWithOpReplaced
static Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
Definition:InstructionSimplify.cpp:4478
threadBinOpOverSelect
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
Definition:InstructionSimplify.cpp:349
simplifyICmpUsingMonotonicValues
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS)
Definition:InstructionSimplify.cpp:3087
computePointerDifference
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
Definition:InstructionSimplify.cpp:683
simplifySRemInst
static Value * simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
Definition:InstructionSimplify.cpp:1234
simplifyFMulInst
static Value * simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given the operands for an FMul, see if we can fold the result.
Definition:InstructionSimplify.cpp:5868
simplifyAndOrOfICmpsWithConstants
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
Definition:InstructionSimplify.cpp:1615
simplifyAndOrWithICmpEq
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Definition:InstructionSimplify.cpp:1897
simplifyICmpWithDominatingAssume
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:3669
simplifyShift
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
Definition:InstructionSimplify.cpp:1296
computePointerICmp
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Definition:InstructionSimplify.cpp:2682
simplifyRem
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
Definition:InstructionSimplify.cpp:1169
valueDominatesPHI
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
Definition:InstructionSimplify.cpp:171
simplifySelectWithICmpCond
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
Definition:InstructionSimplify.cpp:4654
foldMinimumMaximumSharedOp
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
Definition:InstructionSimplify.cpp:6423
simplifyUnaryIntrinsic
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
Definition:InstructionSimplify.cpp:6279
InstructionSimplify.h
Instructions.h
KnownBits.h
LoopAnalysisManager.h
This header provides classes for managing per-loop analyses.
F
#define F(x, y, z)
Definition:MD5.cpp:55
I
#define I(x, y, z)
Definition:MD5.cpp:58
MemoryBuiltins.h
II
uint64_t IntrinsicInst * II
Definition:NVVMIntrRange.cpp:51
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
P
#define P(N)
OverflowInstAnalysis.h
PatternMatch.h
Cond
const SmallVectorImpl< MachineOperand > & Cond
Definition:RISCVRedundantCopyElimination.cpp:75
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
STLExtras.h
This file contains some templates that are useful if you are working with the STL at all.
SetVector.h
This file implements a set that has insertion order iteration characteristics.
Statepoint.h
Statistic.h
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
STATISTIC
#define STATISTIC(VARNAME, DESC)
Definition:Statistic.h:166
getType
static SymbolRef::Type getType(const Symbol *Sym)
Definition:TapiFile.cpp:39
Ptr
@ Ptr
Definition:TargetLibraryInfo.cpp:77
TargetLibraryInfo.h
ValueTracking.h
VectorUtils.h
RHS
Value * RHS
Definition:X86PartialReduction.cpp:74
LHS
Value * LHS
Definition:X86PartialReduction.cpp:73
Mul
BinaryOperator * Mul
Definition:X86PartialReduction.cpp:68
IV
static const uint32_t IV[8]
Definition:blake3_impl.h:78
Predicate
Definition:AMDGPURegBankLegalizeRules.cpp:332
T
VectorType
Definition:ItaniumDemangle.h:1173
llvm::APFloat
Definition:APFloat.h:904
llvm::APInt
Class for arbitrary precision integers.
Definition:APInt.h:78
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition:APInt.cpp:1007
llvm::APInt::getActiveBits
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition:APInt.h:1492
llvm::APInt::isZero
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition:APInt.h:380
llvm::APInt::urem
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition:APInt.cpp:1640
llvm::APInt::setSignBit
void setSignBit()
Set the sign bit to 1.
Definition:APInt.h:1340
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition:APInt.h:1468
llvm::APInt::ult
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition:APInt.h:1111
llvm::APInt::intersects
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition:APInt.h:1249
llvm::APInt::countr_zero
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition:APInt.h:1618
llvm::APInt::isNonPositive
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition:APInt.h:361
llvm::APInt::sextOrTrunc
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition:APInt.cpp:1015
llvm::APInt::isStrictlyPositive
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition:APInt.h:356
llvm::APInt::getLimitedValue
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition:APInt.h:475
llvm::APInt::getBoolValue
bool getBoolValue() const
Convert APInt to a boolean value.
Definition:APInt.h:471
llvm::APInt::srem
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition:APInt.cpp:1710
llvm::APInt::isMask
bool isMask(unsigned numBits) const
Definition:APInt.h:488
llvm::APInt::isMaxSignedValue
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition:APInt.h:405
llvm::APInt::isNonNegative
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition:APInt.h:334
llvm::APInt::ule
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition:APInt.h:1150
llvm::APInt::isSubsetOf
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition:APInt.h:1257
llvm::APInt::isPowerOf2
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition:APInt.h:440
llvm::APInt::getLowBitsSet
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition:APInt.h:306
llvm::APInt::isSignBitSet
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
Definition:APInt.h:341
llvm::APInt::slt
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition:APInt.h:1130
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition:APInt.h:296
llvm::APInt::getZero
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition:APInt.h:200
llvm::APInt::isOne
bool isOne() const
Determine if this is a value of 1.
Definition:APInt.h:389
llvm::APInt::getOneBitSet
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition:APInt.h:239
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition:APInt.h:1221
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition:Instructions.h:63
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition:PassManager.h:253
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition:Argument.h:31
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition:ArrayRef.h:41
llvm::ArrayRef::back
const T & back() const
back - Get the last element.
Definition:ArrayRef.h:177
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition:ArrayRef.h:168
llvm::ArrayRef::drop_back
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition:ArrayRef.h:213
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition:ArrayRef.h:163
llvm::ArrayRef::slice
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition:ArrayRef.h:198
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition:AssumptionCache.h:204
llvm::AssumptionCacheTracker::getAssumptionCache
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
Definition:AssumptionCache.cpp:246
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition:AssumptionCache.h:42
llvm::AssumptionCache::assumptionsFor
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Definition:AssumptionCache.h:157
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition:BasicBlock.h:240
llvm::BinaryOperator
Definition:InstrTypes.h:170
llvm::BinaryOperator::getOpcode
BinaryOps getOpcode() const
Definition:InstrTypes.h:370
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition:InstrTypes.h:1112
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition:Instructions.h:1479
llvm::CastInst::isEliminableCastPair
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
Definition:Instructions.cpp:2759
llvm::CmpInst
This class is the base class for the comparison instructions.
Definition:InstrTypes.h:661
llvm::CmpInst::makeCmpResultType
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition:InstrTypes.h:980
llvm::CmpInst::getStrictPredicate
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition:InstrTypes.h:856
llvm::CmpInst::isFalseWhenEqual
bool isFalseWhenEqual() const
This is just a convenience.
Definition:InstrTypes.h:946
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition:InstrTypes.h:673
llvm::CmpInst::BAD_ICMP_PREDICATE
@ BAD_ICMP_PREDICATE
Definition:InstrTypes.h:706
llvm::CmpInst::ICMP_SLT
@ ICMP_SLT
signed less than
Definition:InstrTypes.h:702
llvm::CmpInst::ICMP_SLE
@ ICMP_SLE
signed less or equal
Definition:InstrTypes.h:703
llvm::CmpInst::ICMP_UGE
@ ICMP_UGE
unsigned greater or equal
Definition:InstrTypes.h:697
llvm::CmpInst::ICMP_UGT
@ ICMP_UGT
unsigned greater than
Definition:InstrTypes.h:696
llvm::CmpInst::ICMP_SGT
@ ICMP_SGT
signed greater than
Definition:InstrTypes.h:700
llvm::CmpInst::ICMP_ULT
@ ICMP_ULT
unsigned less than
Definition:InstrTypes.h:698
llvm::CmpInst::ICMP_EQ
@ ICMP_EQ
equal
Definition:InstrTypes.h:694
llvm::CmpInst::ICMP_NE
@ ICMP_NE
not equal
Definition:InstrTypes.h:695
llvm::CmpInst::ICMP_SGE
@ ICMP_SGE
signed greater or equal
Definition:InstrTypes.h:701
llvm::CmpInst::ICMP_ULE
@ ICMP_ULE
unsigned less or equal
Definition:InstrTypes.h:699
llvm::CmpInst::FCMP_UNO
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition:InstrTypes.h:683
llvm::CmpInst::isSigned
bool isSigned() const
Definition:InstrTypes.h:928
llvm::CmpInst::getSwappedPredicate
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition:InstrTypes.h:825
llvm::CmpInst::isTrueWhenEqual
bool isTrueWhenEqual() const
This is just a convenience.
Definition:InstrTypes.h:940
llvm::CmpInst::isFPPredicate
bool isFPPredicate() const
Definition:InstrTypes.h:780
llvm::CmpInst::getInversePredicate
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition:InstrTypes.h:787
llvm::CmpInst::getPredicate
Predicate getPredicate() const
Return the predicate for this instruction.
Definition:InstrTypes.h:763
llvm::CmpInst::isUnordered
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Definition:Instructions.cpp:3864
llvm::CmpInst::isIntPredicate
bool isIntPredicate() const
Definition:InstrTypes.h:781
llvm::CmpInst::isUnsigned
bool isUnsigned() const
Definition:InstrTypes.h:934
llvm::CmpPredicate
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition:CmpPredicate.h:22
llvm::ConstantExpr::getIntToPtr
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition:Constants.cpp:2307
llvm::ConstantExpr::getExtractElement
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition:Constants.cpp:2555
llvm::ConstantExpr::getBinOpAbsorber
static Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
Definition:Constants.cpp:2763
llvm::ConstantExpr::getNot
static Constant * getNot(Constant *C)
Definition:Constants.cpp:2632
llvm::ConstantExpr::getInsertElement
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition:Constants.cpp:2577
llvm::ConstantExpr::getShuffleVector
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
Definition:Constants.cpp:2600
llvm::ConstantExpr::isSupportedGetElementPtr
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
Definition:Constants.h:1379
llvm::ConstantExpr::getGetElementPtr
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition:Constants.h:1267
llvm::ConstantExpr::getBinOpIdentity
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition:Constants.cpp:2692
llvm::ConstantFP::getZero
static Constant * getZero(Type *Ty, bool Negative=false)
Definition:Constants.cpp:1057
llvm::ConstantFP::getNegativeZero
static Constant * getNegativeZero(Type *Ty)
Definition:Constants.h:309
llvm::ConstantFP::getNaN
static Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
Definition:Constants.cpp:1024
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition:Constants.h:83
llvm::ConstantInt::getTrue
static ConstantInt * getTrue(LLVMContext &Context)
Definition:Constants.cpp:866
llvm::ConstantInt::getSigned
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition:Constants.h:126
llvm::ConstantInt::getFalse
static ConstantInt * getFalse(LLVMContext &Context)
Definition:Constants.cpp:873
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition:Constants.h:157
llvm::ConstantInt::getBool
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition:Constants.cpp:880
llvm::ConstantPointerNull::get
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition:Constants.cpp:1826
llvm::ConstantRange
This class represents a range of values.
Definition:ConstantRange.h:47
llvm::ConstantRange::getSingleElement
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
Definition:ConstantRange.h:251
llvm::ConstantRange::isFullSet
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
Definition:ConstantRange.cpp:414
llvm::ConstantRange::isEmptySet
bool isEmptySet() const
Return true if this set contains no members.
Definition:ConstantRange.cpp:418
llvm::ConstantRange::makeExactICmpRegion
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
Definition:ConstantRange.cpp:158
llvm::ConstantRange::inverse
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
Definition:ConstantRange.cpp:1935
llvm::ConstantRange::contains
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
Definition:ConstantRange.cpp:507
llvm::ConstantStruct::get
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition:Constants.cpp:1378
llvm::ConstantVector::getSplat
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
Definition:Constants.cpp:1472
llvm::ConstantVector::get
static Constant * get(ArrayRef< Constant * > V)
Definition:Constants.cpp:1421
llvm::Constant
This is an important base class in LLVM.
Definition:Constant.h:42
llvm::Constant::getAllOnesValue
static Constant * getAllOnesValue(Type *Ty)
Definition:Constants.cpp:420
llvm::Constant::isAllOnesValue
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
Definition:Constants.cpp:107
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition:Constants.cpp:373
llvm::Constant::isNaN
bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
Definition:Constants.cpp:277
llvm::Constant::getAggregateElement
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition:Constants.cpp:435
llvm::Constant::isNullValue
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition:Constants.cpp:90
llvm::DWARFExpression::Operation
This class represents an Operation in the Expression.
Definition:DWARFExpression.h:32
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition:DataLayout.h:63
llvm::DataLayout::getPointerSizeInBits
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition:DataLayout.h:364
llvm::DataLayout::getIntPtrType
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition:DataLayout.cpp:851
llvm::DataLayout::getIndexTypeSizeInBits
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition:DataLayout.cpp:754
llvm::DataLayout::getIndexType
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition:DataLayout.cpp:878
llvm::DataLayout::getTypeAllocSize
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition:DataLayout.h:457
llvm::DataLayout::getIndexSizeInBits
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition:DataLayout.h:369
llvm::DataLayout::getTypeSizeInBits
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition:DataLayout.h:617
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition:Dominators.h:317
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition:Dominators.h:162
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition:Dominators.cpp:122
llvm::ElementCount
Definition:TypeSize.h:300
llvm::ExtractValueInst
This instruction extracts a struct member or array element value from an aggregate value.
Definition:Instructions.h:2397
llvm::FCmpInst
This instruction compares its operands according to the predicate given to the constructor.
Definition:Instructions.h:1379
llvm::FastMathFlags
Convenience struct for specifying and reasoning about fast-math flags.
Definition:FMF.h:20
llvm::FastMathFlags::noSignedZeros
bool noSignedZeros() const
Definition:FMF.h:68
llvm::FastMathFlags::noInfs
bool noInfs() const
Definition:FMF.h:67
llvm::FastMathFlags::allowReassoc
bool allowReassoc() const
Flag queries.
Definition:FMF.h:65
llvm::FastMathFlags::noNaNs
bool noNaNs() const
Definition:FMF.h:66
llvm::Function
Definition:Function.h:63
llvm::GCRelocateInst
Represents calls to the gc.relocate intrinsic.
Definition:IntrinsicInst.h:1802
llvm::GCRelocateInst::getBasePtr
Value * getBasePtr() const
Definition:IntrinsicInst.cpp:867
llvm::GCRelocateInst::getDerivedPtr
Value * getDerivedPtr() const
Definition:IntrinsicInst.cpp:878
llvm::GEPNoWrapFlags
Represents flags for the getelementptr instruction/expression.
Definition:GEPNoWrapFlags.h:26
llvm::GetElementPtrInst::getIndexedType
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Definition:Instructions.cpp:1514
llvm::GlobalValue
Definition:GlobalValue.h:48
llvm::ICmpInst
This instruction compares its operands according to the predicate given to the constructor.
Definition:Instructions.h:1158
llvm::ICmpInst::compare
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Definition:Instructions.cpp:3745
llvm::ICmpInst::getSignedPredicate
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
Definition:Instructions.h:1238
llvm::ICmpInst::isEquality
bool isEquality() const
Return true if this predicate is either EQ or NE.
Definition:Instructions.h:1291
llvm::ICmpInst::isRelational
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition:Instructions.h:1305
llvm::ICmpInst::getUnsignedPredicate
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Definition:Instructions.h:1249
llvm::InsertValueInst
This instruction inserts a struct field of array element value into an aggregate value.
Definition:Instructions.h:2485
llvm::Instruction
Definition:Instruction.h:68
llvm::Instruction::hasNoSignedZeros
bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
Definition:Instruction.cpp:631
llvm::Instruction::isAssociative
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
Definition:Instruction.cpp:1251
llvm::Instruction::isCommutative
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
Definition:Instruction.cpp:1268
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition:Instruction.cpp:72
llvm::Instruction::BinaryOps
BinaryOps
Definition:Instruction.h:989
llvm::Instruction::UnaryOps
UnaryOps
Definition:Instruction.h:982
llvm::Instruction::CastOps
CastOps
Definition:Instruction.h:1003
llvm::LoadInst
An instruction for reading from memory.
Definition:Instructions.h:176
llvm::LoadInst::isVolatile
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition:Instructions.h:205
llvm::MDNode
Metadata node.
Definition:Metadata.h:1073
llvm::MinMaxIntrinsic::getSaturationPoint
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
Definition:IntrinsicInst.h:813
llvm::MinMaxIntrinsic::getPredicate
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
Definition:IntrinsicInst.h:798
llvm::PHINode
Definition:Instructions.h:2600
llvm::PHINode::incoming_values
op_range incoming_values()
Definition:Instructions.h:2665
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition:Instructions.h:2695
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition:Instructions.h:2675
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition:Instructions.h:2671
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition:Pass.h:94
llvm::PoisonValue::get
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition:Constants.cpp:1878
llvm::PtrToIntInst
This class represents a cast from a pointer to an integer.
Definition:Instructions.h:4851
llvm::SExtInst
This class represents a sign extension of integer types.
Definition:Instructions.h:4600
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition:Instructions.h:1657
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition:SetVector.h:98
llvm::SetVector::insert
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition:SetVector.h:162
llvm::ShuffleVectorInst::commuteShuffleMask
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
Definition:Instructions.h:2308
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition:SmallPtrSet.h:363
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition:SmallPtrSet.h:384
llvm::SmallPtrSetImpl::contains
bool contains(ConstPtrType Ptr) const
Definition:SmallPtrSet.h:458
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition:SmallPtrSet.h:519
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition:SetVector.h:370
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition:SmallVector.h:573
llvm::SmallVectorImpl::assign
void assign(size_type NumElts, ValueParamT Elt)
Definition:SmallVector.h:704
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition:SmallVector.h:663
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::SmallVectorTemplateCommon::begin
iterator begin()
Definition:SmallVector.h:267
llvm::SmallVectorTemplateCommon::back
reference back()
Definition:SmallVector.h:308
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition:SmallVector.h:1196
llvm::SrcOp
Definition:MachineIRBuilder.h:142
llvm::TargetLibraryInfoWrapperPass
Definition:TargetLibraryInfo.h:639
llvm::TargetLibraryInfoWrapperPass::getTLI
TargetLibraryInfo & getTLI(const Function &F)
Definition:TargetLibraryInfo.h:655
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition:TargetLibraryInfo.h:280
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition:Type.h:45
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition:Type.h:270
llvm::Type::isIntOrIntVectorTy
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition:Type.h:243
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition:Type.h:264
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition:Type.h:310
llvm::Type::isScalableTy
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition:Type.h:128
llvm::Type::isPtrOrPtrVectorTy
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition:Type.h:267
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition:Constants.cpp:1859
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition:Use.h:43
llvm::User
Definition:User.h:44
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition:User.h:228
llvm::Value
LLVM Value Representation.
Definition:Value.h:74
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition:Value.h:255
llvm::Value::stripAndAccumulateConstantOffsets
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition:Value.cpp:1075
llvm::ZExtInst
This class represents zero extension of integer types.
Definition:Instructions.h:4569
llvm::details::FixedOrScalableQuantity::isScalable
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition:TypeSize.h:171
llvm::details::FixedOrScalableQuantity::getKnownMinValue
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition:TypeSize.h:168
uint64_t
unsigned
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition:ErrorHandling.h:143
llvm::CallingConv::C
@ C
The default llvm calling convention, compatible with C.
Definition:CallingConv.h:34
llvm::PatternMatch
Definition:PatternMatch.h:47
llvm::PatternMatch::m_AllOnes
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition:PatternMatch.h:524
llvm::PatternMatch::m_Poison
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition:PatternMatch.h:160
llvm::PatternMatch::m_LowBitMask
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
Definition:PatternMatch.h:673
llvm::PatternMatch::m_And
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1216
llvm::PatternMatch::m_PtrAdd
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
Definition:PatternMatch.h:1944
llvm::PatternMatch::m_Negative
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
Definition:PatternMatch.h:550
llvm::PatternMatch::m_Add
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1102
llvm::PatternMatch::m_BinOp
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition:PatternMatch.h:100
llvm::PatternMatch::m_FCmp
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Definition:PatternMatch.h:1633
llvm::PatternMatch::m_c_FMul
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
Definition:PatternMatch.h:2887
llvm::PatternMatch::m_SignMask
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
Definition:PatternMatch.h:664
llvm::PatternMatch::m_AShr
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1246
llvm::PatternMatch::m_Inf
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
Definition:PatternMatch.h:726
llvm::PatternMatch::m_BitReverse
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
Definition:PatternMatch.h:2692
llvm::PatternMatch::m_FSub
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1120
llvm::PatternMatch::m_Power2
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition:PatternMatch.h:619
llvm::PatternMatch::m_FNegNSZ
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
Definition:PatternMatch.h:1163
llvm::PatternMatch::m_URem
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1198
llvm::PatternMatch::m_Constant
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition:PatternMatch.h:165
llvm::PatternMatch::m_c_And
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
Definition:PatternMatch.h:2798
llvm::PatternMatch::m_Trunc
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
Definition:PatternMatch.h:2075
llvm::PatternMatch::m_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1228
llvm::PatternMatch::m_SpecificInt
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition:PatternMatch.h:982
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition:PatternMatch.h:49
llvm::PatternMatch::m_IDiv
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
Definition:PatternMatch.h:1553
llvm::PatternMatch::m_AnyZeroFP
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
Definition:PatternMatch.h:764
llvm::PatternMatch::m_Specific
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition:PatternMatch.h:885
llvm::PatternMatch::m_Shr
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
Definition:PatternMatch.h:1525
llvm::PatternMatch::m_c_ICmp
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
Definition:PatternMatch.h:2765
llvm::PatternMatch::m_ExtractElt
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
Definition:PatternMatch.h:1837
llvm::PatternMatch::m_ConstantInt
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition:PatternMatch.h:168
llvm::PatternMatch::m_One
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition:PatternMatch.h:592
llvm::PatternMatch::m_Select
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
Definition:PatternMatch.h:1799
llvm::PatternMatch::m_NegZeroFP
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
Definition:PatternMatch.h:782
llvm::PatternMatch::m_SpecificFP
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
Definition:PatternMatch.h:928
llvm::PatternMatch::m_CombineAnd
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition:PatternMatch.h:245
llvm::PatternMatch::m_SMin
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
Definition:PatternMatch.h:2348
llvm::PatternMatch::m_Sqrt
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
Definition:PatternMatch.h:2736
llvm::PatternMatch::m_c_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
Definition:PatternMatch.h:2812
llvm::PatternMatch::m_Mul
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1168
llvm::PatternMatch::m_Deferred
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition:PatternMatch.h:903
llvm::PatternMatch::m_ZeroInt
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition:PatternMatch.h:599
llvm::PatternMatch::m_APIntAllowPoison
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition:PatternMatch.h:305
llvm::PatternMatch::m_Neg
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
Definition:PatternMatch.h:2820
llvm::PatternMatch::m_ImmConstant
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition:PatternMatch.h:864
llvm::PatternMatch::m_NSWShl
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1305
llvm::PatternMatch::m_ZExt
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
Definition:PatternMatch.h:2107
llvm::PatternMatch::m_NUWShl
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1348
llvm::PatternMatch::m_NUWMul
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1340
llvm::PatternMatch::m_UDiv
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1180
llvm::PatternMatch::m_UMax
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
Definition:PatternMatch.h:2354
llvm::PatternMatch::m_CheckedInt
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
Definition:PatternMatch.h:481
llvm::PatternMatch::m_FPOne
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
Definition:PatternMatch.h:931
llvm::PatternMatch::m_c_Add
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
Definition:PatternMatch.h:2784
llvm::PatternMatch::m_APFloatAllowPoison
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
Definition:PatternMatch.h:322
llvm::PatternMatch::m_UIToFP
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
Definition:PatternMatch.h:2151
llvm::PatternMatch::m_FShl
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Definition:PatternMatch.h:2725
llvm::PatternMatch::m_c_MaxOrMin
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
Definition:PatternMatch.h:2864
llvm::PatternMatch::m_SDiv
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1186
llvm::PatternMatch::m_NUWSub
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1332
llvm::PatternMatch::m_SMax
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
Definition:PatternMatch.h:2342
llvm::PatternMatch::m_APInt
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition:PatternMatch.h:299
llvm::PatternMatch::m_Value
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition:PatternMatch.h:92
llvm::PatternMatch::m_NSWAdd
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1281
llvm::PatternMatch::m_SIToFP
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
Definition:PatternMatch.h:2156
llvm::PatternMatch::m_LShr
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1240
llvm::PatternMatch::m_ICmp
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Definition:PatternMatch.h:1627
llvm::PatternMatch::m_Exact
Exact_match< T > m_Exact(const T &SubPattern)
Definition:PatternMatch.h:1580
llvm::PatternMatch::m_FNeg
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
Definition:PatternMatch.h:1156
llvm::PatternMatch::m_PosZeroFP
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
Definition:PatternMatch.h:773
llvm::PatternMatch::m_c_FAdd
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
Definition:PatternMatch.h:2880
llvm::PatternMatch::m_c_LogicalAnd
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
Definition:PatternMatch.h:3086
llvm::PatternMatch::m_Shl
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1234
llvm::PatternMatch::m_VecReverse
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
Definition:PatternMatch.h:2747
llvm::PatternMatch::m_APFloat
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition:PatternMatch.h:316
llvm::PatternMatch::m_MaxOrMin
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
Definition:PatternMatch.h:2371
llvm::PatternMatch::m_FShr
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Definition:PatternMatch.h:2731
llvm::PatternMatch::m_SRem
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1204
llvm::PatternMatch::m_Undef
auto m_Undef()
Match an arbitrary undef constant.
Definition:PatternMatch.h:152
llvm::PatternMatch::m_NaN
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
Definition:PatternMatch.h:710
llvm::PatternMatch::m_Not
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Definition:PatternMatch.h:2467
llvm::PatternMatch::m_Or
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1222
llvm::PatternMatch::m_BSwap
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
Definition:PatternMatch.h:2697
llvm::PatternMatch::m_SExt
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
Definition:PatternMatch.h:2101
llvm::PatternMatch::m_Zero
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition:PatternMatch.h:612
llvm::PatternMatch::m_c_Or
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
Definition:PatternMatch.h:2805
llvm::PatternMatch::m_c_LogicalOr
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
Definition:PatternMatch.h:3104
llvm::PatternMatch::m_InsertElt
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
Definition:PatternMatch.h:1829
llvm::PatternMatch::m_ElementWiseBitCast
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
Definition:PatternMatch.h:2049
llvm::PatternMatch::m_FAbs
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
Definition:PatternMatch.h:2702
llvm::PatternMatch::m_c_Mul
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
Definition:PatternMatch.h:2791
llvm::PatternMatch::m_PtrToInt
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
Definition:PatternMatch.h:2056
llvm::PatternMatch::m_NSWMul
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1297
llvm::PatternMatch::m_Sub
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition:PatternMatch.h:1114
llvm::PatternMatch::m_UMin
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
Definition:PatternMatch.h:2360
llvm::PatternMatch::m_CombineOr
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition:PatternMatch.h:239
llvm::fp::ExceptionBehavior
ExceptionBehavior
Exception behavior used for floating point operations.
Definition:FPEnv.h:38
llvm::fp::ebStrict
@ ebStrict
This corresponds to "fpexcept.strict".
Definition:FPEnv.h:41
llvm::fp::ebIgnore
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition:FPEnv.h:39
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::getInverseMinMaxIntrinsic
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
Definition:ValueTracking.cpp:9140
llvm::simplifyAShrInst
Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
Definition:InstructionSimplify.cpp:1499
llvm::Log2_32_Ceil
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition:MathExtras.h:354
llvm::Offset
@ Offset
Definition:DWP.cpp:480
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition:STLExtras.h:1739
llvm::simplifyFMulInst
Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
Definition:InstructionSimplify.cpp:5896
llvm::simplifyGEPInst
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
Definition:InstructionSimplify.cpp:5138
llvm::isValidAssumeForContext
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
Definition:ValueTracking.cpp:509
llvm::canCreatePoison
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
Definition:ValueTracking.cpp:7636
llvm::ConstantFoldSelectInstruction
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Definition:ConstantFold.cpp:261
llvm::simplifyFreezeInst
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
Definition:InstructionSimplify.cpp:7050
llvm::ConstantFoldFPInstOperands
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
Definition:ConstantFolding.cpp:1419
llvm::isSignBitCheck
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
Definition:ValueTracking.cpp:4481
llvm::Depth
@ Depth
Definition:SIMachineScheduler.h:36
llvm::canConstantFoldCallTo
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
Definition:ConstantFolding.cpp:1565
llvm::getMinMaxLimit
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
Definition:ValueTracking.cpp:9156
llvm::simplifySDivInst
Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
Definition:InstructionSimplify.cpp:1215
llvm::simplifyUnOp
Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
Definition:InstructionSimplify.cpp:6037
llvm::isDefaultFPEnvironment
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Definition:FPEnv.h:65
llvm::simplifyMulInst
Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
Definition:InstructionSimplify.cpp:957
llvm::IsConstantOffsetFromGlobal
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
Definition:ConstantFolding.cpp:299
llvm::simplifyInstructionWithOperands
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
Definition:InstructionSimplify.cpp:7226
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Definition:ValueTracking.cpp:6768
llvm::simplifyCall
Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
Definition:InstructionSimplify.cpp:7005
llvm::ConstantFoldCompareInstOperands
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
Definition:ConstantFolding.cpp:1186
llvm::isKnownToBeAPowerOfTwo
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
Definition:ValueTracking.cpp:280
llvm::canRoundingModeBe
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
Definition:FPEnv.h:77
llvm::isNoAliasCall
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
Definition:AliasAnalysis.cpp:801
llvm::simplifyFCmpInst
Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
Definition:InstructionSimplify.cpp:4289
llvm::getSplatValue
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
Definition:VectorUtils.cpp:312
llvm::ConstantFoldGetElementPtr
Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
Definition:ConstantFold.cpp:1315
llvm::getMinMaxPred
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
Definition:ValueTracking.cpp:9105
llvm::simplifyShuffleVectorInst
Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
Definition:InstructionSimplify.cpp:5568
llvm::ConstantFoldCall
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Definition:ConstantFolding.cpp:3870
llvm::simplifyOrInst
Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
Definition:InstructionSimplify.cpp:2477
llvm::simplifyXorInst
Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
Definition:InstructionSimplify.cpp:2562
llvm::getConstantRangeFromMetadata
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Definition:ConstantRange.cpp:2264
llvm::computeConstantRange
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Definition:ValueTracking.cpp:10078
llvm::ConstantFoldExtractValueInstruction
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Definition:ConstantFold.cpp:505
llvm::isAllocLikeFn
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
Definition:MemoryBuiltins.cpp:313
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
Definition:ValueTracking.cpp:333
llvm::simplifyCastInst
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
Definition:InstructionSimplify.cpp:5399
llvm::simplifyInstruction
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Definition:InstructionSimplify.cpp:7234
llvm::M1
unsigned M1(unsigned Val)
Definition:VE.h:376
llvm::simplifySubInst
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Definition:InstructionSimplify.cpp:885
llvm::simplifyAddInst
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Definition:InstructionSimplify.cpp:656
llvm::ConstantFoldConstant
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
Definition:ConstantFolding.cpp:1171
llvm::transform
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition:STLExtras.h:1952
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition:STLExtras.h:1746
llvm::getObjectSize
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Definition:MemoryBuiltins.cpp:578
llvm::isSplatValue
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Definition:VectorUtils.cpp:327
llvm::ConstantFoldLoadFromUniformValue
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
Definition:ConstantFolding.cpp:763
llvm::getInverseMinMaxFlavor
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
Definition:ValueTracking.cpp:9132
llvm::replaceAndRecursivelySimplify
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
Definition:InstructionSimplify.cpp:7310
llvm::ConstantFoldUnaryOpOperand
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Definition:ConstantFolding.cpp:1293
llvm::SelectPatternFlavor
SelectPatternFlavor
Specific patterns of select instructions we can match.
Definition:ValueTracking.h:1113
llvm::simplifyShlInst
Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Definition:InstructionSimplify.cpp:1429
llvm::simplifyFNegInst
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Definition:InstructionSimplify.cpp:5596
llvm::simplifyFSubInst
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
Definition:InstructionSimplify.cpp:5888
llvm::impliesPoison
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
Definition:ValueTracking.cpp:7690
llvm::simplifyFRemInst
Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
Definition:InstructionSimplify.cpp:6001
llvm::simplifyFAddInst
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
Definition:InstructionSimplify.cpp:5880
llvm::ComplexDeinterleavingOperation::Splat
@ Splat
llvm::FPClassTest
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
Definition:FloatingPointMode.h:239
llvm::fcInf
@ fcInf
Definition:FloatingPointMode.h:254
llvm::fcNone
@ fcNone
Definition:FloatingPointMode.h:240
llvm::fcAllFlags
@ fcAllFlags
Definition:FloatingPointMode.h:264
llvm::fcNan
@ fcNan
Definition:FloatingPointMode.h:253
llvm::PointerMayBeCaptured
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Definition:CaptureTracking.cpp:204
llvm::simplifyLShrInst
Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
Definition:InstructionSimplify.cpp:1466
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition:Function.cpp:1187
llvm::simplifyICmpInst
Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
Definition:InstructionSimplify.cpp:4055
llvm::getVScaleRange
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Definition:ValueTracking.cpp:1058
llvm::ConstantFoldInstOperands
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Definition:ConstantFolding.cpp:1177
llvm::ConstantFoldCastOperand
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Definition:ConstantFolding.cpp:1462
llvm::simplifyAndInst
Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
Definition:InstructionSimplify.cpp:2203
llvm::simplifyExtractValueInst
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Definition:InstructionSimplify.cpp:5247
llvm::isNotCrossLaneOperation
bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
Definition:ValueTracking.cpp:7035
llvm::simplifyInsertValueInst
Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
Definition:InstructionSimplify.cpp:5177
llvm::ConstantFoldBinaryOpOperands
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Definition:ConstantFolding.cpp:1300
llvm::simplifyFDivInst
Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
Definition:InstructionSimplify.cpp:5963
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
Definition:ValueTracking.cpp:3487
llvm::PoisonMaskElem
constexpr int PoisonMaskElem
Definition:Instructions.h:1889
llvm::simplifyLoadInst
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
Definition:InstructionSimplify.cpp:7054
llvm::simplifyFMAFMul
Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
Definition:InstructionSimplify.cpp:5904
llvm::simplifyConstrainedFPCall
Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
Definition:InstructionSimplify.cpp:7031
llvm::simplifyBinOp
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
Definition:InstructionSimplify.cpp:6116
llvm::isKnownNonEqual
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
Definition:ValueTracking.cpp:318
llvm::RecurKind::Or
@ Or
Bitwise or logical OR of integers.
llvm::decomposeBitTestICmp
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
Definition:CmpInstAnalysis.cpp:77
llvm::findScalarElement
Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
Definition:VectorUtils.cpp:226
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition:ValueTracking.cpp:164
llvm::simplifyUDivInst
Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
Definition:InstructionSimplify.cpp:1227
llvm::Op
DWARFExpression::Operation Op
Definition:DWARFExpression.cpp:22
llvm::simplifyBinaryIntrinsic
Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
Definition:InstructionSimplify.cpp:6465
llvm::RoundingMode
RoundingMode
Rounding mode.
Definition:FloatingPointMode.h:37
llvm::isGuaranteedNotToBeUndefOrPoison
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Definition:ValueTracking.cpp:7841
llvm::M0
unsigned M0(unsigned Val)
Definition:VE.h:375
llvm::simplifyInsertElementInst
Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
Definition:InstructionSimplify.cpp:5183
llvm::BitWidth
constexpr unsigned BitWidth
Definition:BitmaskEnum.h:217
llvm::matchDecomposedSelectPattern
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
Definition:ValueTracking.cpp:9066
llvm::simplifyWithOpReplaced
Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
Definition:InstructionSimplify.cpp:4487
llvm::maskIsAllZeroOrUndef
bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
Definition:VectorUtils.cpp:1123
llvm::fcmpToClassTest
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Definition:ValueTracking.cpp:4519
llvm::simplifySRemInst
Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
Definition:InstructionSimplify.cpp:1249
llvm::getUnderlyingObjects
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Definition:ValueTracking.cpp:6815
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition:STLExtras.h:1903
llvm::computeKnownFPSignBit
std::optional< bool > computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return false if we can prove that the specified FP value's sign bit is 0.
Definition:ValueTracking.h:610
llvm::ComputeNumSignBits
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
Definition:ValueTracking.cpp:351
llvm::cannotBeNegativeZero
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
Definition:ValueTracking.h:561
llvm::all_equal
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition:STLExtras.h:2087
llvm::ConstantFoldInsertValueInstruction
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Definition:ConstantFold.cpp:517
llvm::ConstantFoldLoadFromConstPtr
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
Definition:ConstantFolding.cpp:735
llvm::isImpliedByDomCondition
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
Definition:ValueTracking.cpp:9680
llvm::simplifyCmpInst
Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
Definition:InstructionSimplify.cpp:6134
llvm::isGuaranteedNotToBePoison
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
Definition:ValueTracking.cpp:7849
llvm::computeKnownFPClass
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
Definition:ValueTracking.cpp:6169
llvm::isKnownNegation
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
Definition:ValueTracking.cpp:8571
llvm::ConstantFoldIntegerCast
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
Definition:ConstantFolding.cpp:1549
llvm::getBestSimplifyQuery
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
Definition:InstructionSimplify.cpp:7321
llvm::isCheckForZeroAndMulWithOverflow
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
Definition:OverflowInstAnalysis.cpp:21
llvm::canIgnoreSNaN
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Definition:FPEnv.h:83
llvm::simplifyURemInst
Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
Definition:InstructionSimplify.cpp:1260
llvm::simplifyExtractElementInst
Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
Definition:InstructionSimplify.cpp:5299
llvm::simplifySelectInst
Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
Definition:InstructionSimplify.cpp:4997
llvm::isImpliedCondition
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
Definition:ValueTracking.cpp:9596
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition:BitVector.h:860
N
#define N
llvm::CaptureTracker
This callback is used in conjunction with PointerMayBeCaptured.
Definition:CaptureTracking.h:83
llvm::CaptureTracker::tooManyUses
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
llvm::CaptureTracker::captured
virtual bool captured(const Use *U)=0
captured - Information about the pointer was captured by the user of use U.
llvm::Incoming
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Definition:SILowerI1Copies.h:25
llvm::InstrInfoQuery
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
Definition:SimplifyQuery.h:25
llvm::InstrInfoQuery::isExact
bool isExact(const BinaryOperator *Op) const
Definition:SimplifyQuery.h:48
llvm::InstrInfoQuery::getMetadata
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
Definition:SimplifyQuery.h:30
llvm::InstrInfoQuery::hasNoSignedWrap
bool hasNoSignedWrap(const InstT *Op) const
Definition:SimplifyQuery.h:42
llvm::InstrInfoQuery::UseInstrInfo
bool UseInstrInfo
Definition:SimplifyQuery.h:28
llvm::InstrInfoQuery::hasNoUnsignedWrap
bool hasNoUnsignedWrap(const InstT *Op) const
Definition:SimplifyQuery.h:36
llvm::KnownBits
Definition:KnownBits.h:23
llvm::KnownBits::isNonNegative
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition:KnownBits.h:100
llvm::KnownBits::isZero
bool isZero() const
Returns true if value is all zero.
Definition:KnownBits.h:79
llvm::KnownBits::countMinTrailingZeros
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition:KnownBits.h:234
llvm::KnownBits::countMaxTrailingZeros
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition:KnownBits.h:266
llvm::KnownBits::hasConflict
bool hasConflict() const
Returns true if there is conflicting information.
Definition:KnownBits.h:50
llvm::KnownBits::getBitWidth
unsigned getBitWidth() const
Get the bit width of this value.
Definition:KnownBits.h:43
llvm::KnownBits::countMaxActiveBits
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition:KnownBits.h:288
llvm::KnownBits::countMinLeadingZeros
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition:KnownBits.h:240
llvm::KnownBits::getMaxValue
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition:KnownBits.h:137
llvm::KnownBits::getMinValue
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition:KnownBits.h:121
llvm::KnownBits::isNegative
bool isNegative() const
Returns true if this value is known to be negative.
Definition:KnownBits.h:97
llvm::KnownBits::One
APInt One
Definition:KnownBits.h:25
llvm::KnownBits::Zero
APInt Zero
Definition:KnownBits.h:24
llvm::KnownBits::shl
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
Definition:KnownBits.cpp:285
llvm::KnownFPClass
Definition:ValueTracking.h:261
llvm::KnownFPClass::isKnownAlwaysNaN
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
Definition:ValueTracking.h:290
llvm::KnownFPClass::OrderedLessThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
Definition:ValueTracking.h:350
llvm::KnownFPClass::SignBit
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
Definition:ValueTracking.h:267
llvm::KnownFPClass::isKnownNeverNaN
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
Definition:ValueTracking.h:285
llvm::KnownFPClass::isKnownNever
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
Definition:ValueTracking.h:274
llvm::KnownFPClass::cannotBeOrderedLessThanZero
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
Definition:ValueTracking.h:363
llvm::LoopStandardAnalysisResults
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Definition:LoopAnalysisManager.h:53
llvm::LoopStandardAnalysisResults::AC
AssumptionCache & AC
Definition:LoopAnalysisManager.h:55
llvm::LoopStandardAnalysisResults::TLI
TargetLibraryInfo & TLI
Definition:LoopAnalysisManager.h:59
llvm::LoopStandardAnalysisResults::DT
DominatorTree & DT
Definition:LoopAnalysisManager.h:56
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition:MemoryBuiltins.h:138
llvm::ObjectSizeOpts::NullIsUnknownSize
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Definition:MemoryBuiltins.h:162
llvm::ObjectSizeOpts::EvalMode
Mode EvalMode
How we want to evaluate this object's size.
Definition:MemoryBuiltins.h:155
llvm::SelectPatternResult::Flavor
SelectPatternFlavor Flavor
Definition:ValueTracking.h:1137
llvm::SelectPatternResult::isMinOrMax
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
Definition:ValueTracking.h:1145
llvm::SimplifyQuery
Definition:SimplifyQuery.h:70
llvm::SimplifyQuery::DL
const DataLayout & DL
Definition:SimplifyQuery.h:71
llvm::SimplifyQuery::CxtI
const Instruction * CxtI
Definition:SimplifyQuery.h:75
llvm::SimplifyQuery::CanUseUndef
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
Definition:SimplifyQuery.h:87
llvm::SimplifyQuery::DT
const DominatorTree * DT
Definition:SimplifyQuery.h:73
llvm::SimplifyQuery::getWithInstruction
SimplifyQuery getWithInstruction(const Instruction *I) const
Definition:SimplifyQuery.h:107
llvm::SimplifyQuery::isUndefValue
bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
Definition:InstructionSimplify.cpp:7347
llvm::SimplifyQuery::AC
AssumptionCache * AC
Definition:SimplifyQuery.h:74
llvm::SimplifyQuery::TLI
const TargetLibraryInfo * TLI
Definition:SimplifyQuery.h:72
llvm::SimplifyQuery::getWithoutUndef
SimplifyQuery getWithoutUndef() const
Definition:SimplifyQuery.h:112
llvm::SimplifyQuery::IIQ
const InstrInfoQuery IIQ
Definition:SimplifyQuery.h:82

Generated on Thu Jul 17 2025 10:54:39 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp