llvm.org GIT mirror llvm / 430b8a2
Fix several const-correctness issues, resolving some -Wcast-qual warnings. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@54349 91177308-0d34-0410-b5e6-96231b3b80d8 Dan Gohman 11 years ago
4 changed file(s) with 17 addition(s) and 16 deletion(s). Raw diff Collapse all Expand all
8787 void Schedule();
8888
8989 /// IsReachable - Checks if SU is reachable from TargetSU.
90 bool IsReachable(SUnit *SU, SUnit *TargetSU);
90 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
9191
9292 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
9393 /// create a cycle.
154154 /// DFS - make a DFS traversal and mark all nodes affected by the
155155 /// edge insertion. These nodes will later get new topological indexes
156156 /// by means of the Shift method.
157 void DFS(SUnit *SU, int UpperBound, bool& HasLoop);
157 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
158158
159159 /// Shift - reassign topological indexes for the nodes in the DAG
160160 /// to preserve the topological ordering.
394394 }
395395
396396 /// IsReachable - Checks if SU is reachable from TargetSU.
397 bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) {
397 bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) {
398398 // If insertion of the edge SU->TargetSU would create a cycle
399399 // then there is a path from TargetSU to SU.
400400 int UpperBound, LowerBound;
542542 /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
543543 /// all nodes affected by the edge insertion. These nodes will later get new
544544 /// topological indexes by means of the Shift method.
545 void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) {
546 std::vector WorkList;
545 void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) {
546 std::vector WorkList;
547547 WorkList.reserve(SUnits.size());
548548
549549 WorkList.push_back(SU);
14021402 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
14031403 : public RegReductionPriorityQueue {
14041404 // SUnits - The SUnits for the current graph.
1405 const std::vector *SUnits;
1405 std::vector *SUnits;
14061406
14071407 // SethiUllmanNumbers - The SethiUllman number for each node.
14081408 std::vector SethiUllmanNumbers;
16911691
16921692 /// hasCopyToRegUse - Return true if SU has a value successor that is a
16931693 /// CopyToReg node.
1694 static bool hasCopyToRegUse(SUnit *SU) {
1694 static bool hasCopyToRegUse(const SUnit *SU) {
16951695 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
16961696 I != E; ++I) {
16971697 if (I->isCtrl) continue;
1698 SUnit *SuccSU = I->Dep;
1698 const SUnit *SuccSU = I->Dep;
16991699 if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
17001700 return true;
17011701 }
17041704
17051705 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
17061706 /// physical register defs.
1707 static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
1707 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
17081708 const TargetInstrInfo *TII,
17091709 const TargetRegisterInfo *TRI) {
17101710 SDNode *N = SuccSU->Node;
17381738 /// commutable, favor the one that's not commutable.
17391739 void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
17401740 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1741 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1741 SUnit *SU = &(*SUnits)[i];
17421742 if (!SU->isTwoAddress)
17431743 continue;
17441744
18181818 unsigned Sum = 0;
18191819 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
18201820 I != E; ++I) {
1821 SUnit *SuccSU = I->Dep;
1821 const SUnit *SuccSU = I->Dep;
18221822 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
18231823 EE = SuccSU->Preds.end(); II != EE; ++II) {
18241824 SUnit *PredSU = II->Dep;
5757 Grow();
5858
5959 // Okay, we know we have space. Find a hash bucket.
60 void **Bucket = const_cast(FindBucketFor((void*)Ptr));
60 const void **Bucket = const_cast(FindBucketFor(Ptr));
6161 if (*Bucket == Ptr) return false; // Already inserted, good.
6262
6363 // Otherwise, insert it!
6464 if (*Bucket == getTombstoneMarker())
6565 --NumTombstones;
66 *Bucket = (void*)Ptr;
66 *Bucket = Ptr;
6767 ++NumElements; // Track density.
6868 return true;
6969 }
10691069 }
10701070
10711071 typename MapTy::iterator I =
1072 Map.find(MapKey((TypeClass*)CP->getRawType(), getValType(CP)));
1072 Map.find(MapKey(static_cast(CP->getRawType()),
1073 getValType(CP)));
10731074 if (I == Map.end() || I->second != CP) {
10741075 // FIXME: This should not use a linear scan. If this gets to be a
10751076 // performance problem, someone should look at this.
8383 // Now call the destructor for the subclass directly because we're going
8484 // to delete this as an array of char.
8585 if (isa(this))
86 ((FunctionType*)this)->FunctionType::~FunctionType();
86 static_cast(this)->FunctionType::~FunctionType();
8787 else
88 ((StructType*)this)->StructType::~StructType();
88 static_cast(this)->StructType::~StructType();
8989
9090 // Finally, remove the memory as an array deallocation of the chars it was
9191 // constructed from.