@@ -81,12 +81,19 @@ where
81
81
/// the values inferred while solving the instantiated goal.
82
82
/// - `external_constraints`: additional constraints which aren't expressible
83
83
/// using simple unification of inference variables.
84
+ ///
85
+ /// This takes the `shallow_certainty` which represents whether we're confident
86
+ /// that the final result of the current goal only depends on the nested goals.
87
+ ///
88
+ /// In case this is `Certainy::Maybe`, there may still be additional nested goals
89
+ /// or inference constraints required for this candidate to be hold. The candidate
90
+ /// always requires all already added constraints and nested goals.
84
91
#[ instrument( level = "trace" , skip( self ) , ret) ]
85
92
pub ( in crate :: solve) fn evaluate_added_goals_and_make_canonical_response (
86
93
& mut self ,
87
- certainty : Certainty ,
94
+ shallow_certainty : Certainty ,
88
95
) -> QueryResult < I > {
89
- self . inspect . make_canonical_response ( certainty ) ;
96
+ self . inspect . make_canonical_response ( shallow_certainty ) ;
90
97
91
98
let goals_certainty = self . try_evaluate_added_goals ( ) ?;
92
99
assert_eq ! (
@@ -103,26 +110,29 @@ where
103
110
NoSolution
104
111
} ) ?;
105
112
106
- // When normalizing, we've replaced the expected term with an unconstrained
107
- // inference variable. This means that we dropped information which could
108
- // have been important. We handle this by instead returning the nested goals
109
- // to the caller, where they are then handled.
110
- //
111
- // As we return all ambiguous nested goals, we can ignore the certainty returned
112
- // by `try_evaluate_added_goals()`.
113
- let ( certainty, normalization_nested_goals) = match self . current_goal_kind {
114
- CurrentGoalKind :: NormalizesTo => {
115
- let goals = std:: mem:: take ( & mut self . nested_goals ) ;
116
- if goals. is_empty ( ) {
117
- assert ! ( matches!( goals_certainty, Certainty :: Yes ) ) ;
113
+ let ( certainty, normalization_nested_goals) =
114
+ match ( self . current_goal_kind , shallow_certainty) {
115
+ // When normalizing, we've replaced the expected term with an unconstrained
116
+ // inference variable. This means that we dropped information which could
117
+ // have been important. We handle this by instead returning the nested goals
118
+ // to the caller, where they are then handled. We only do so if we do not
119
+ // need to recompute the `NormalizesTo` goal afterwards to avoid repeatedly
120
+ // uplifting its nested goals. This is the case if the `shallow_certainty` is
121
+ // `Certainty::Yes`.
122
+ ( CurrentGoalKind :: NormalizesTo , Certainty :: Yes ) => {
123
+ let goals = std:: mem:: take ( & mut self . nested_goals ) ;
124
+ // As we return all ambiguous nested goals, we can ignore the certainty
125
+ // returned by `self.try_evaluate_added_goals()`.
126
+ if goals. is_empty ( ) {
127
+ assert ! ( matches!( goals_certainty, Certainty :: Yes ) ) ;
128
+ }
129
+ ( Certainty :: Yes , NestedNormalizationGoals ( goals) )
118
130
}
119
- ( certainty, NestedNormalizationGoals ( goals) )
120
- }
121
- CurrentGoalKind :: Misc | CurrentGoalKind :: CoinductiveTrait => {
122
- let certainty = certainty. unify_with ( goals_certainty) ;
123
- ( certainty, NestedNormalizationGoals :: empty ( ) )
124
- }
125
- } ;
131
+ _ => {
132
+ let certainty = shallow_certainty. unify_with ( goals_certainty) ;
133
+ ( certainty, NestedNormalizationGoals :: empty ( ) )
134
+ }
135
+ } ;
126
136
127
137
if let Certainty :: Maybe ( cause @ MaybeCause :: Overflow { .. } ) = certainty {
128
138
// If we have overflow, it's probable that we're substituting a type
0 commit comments