Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions compiler/rustc_middle/src/ty/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3470,10 +3470,9 @@ impl<'tcx> TyCtxt<'tcx> {

pub fn intrinsic(self, def_id: impl IntoQueryParam<DefId> + Copy) -> Option<ty::IntrinsicDef> {
match self.def_kind(def_id) {
DefKind::Fn | DefKind::AssocFn => {}
_ => return None,
DefKind::Fn | DefKind::AssocFn => self.intrinsic_raw(def_id),
_ => None,
}
self.intrinsic_raw(def_id)
}

pub fn next_trait_solver_globally(self) -> bool {
Expand Down
8 changes: 2 additions & 6 deletions compiler/rustc_middle/src/ty/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -642,12 +642,8 @@ impl<'tcx> TyCtxt<'tcx> {
/// has its own type-checking context or "inference environment".
///
/// For example, a closure has its own `DefId`, but it is type-checked
/// with the containing item. Similarly, an inline const block has its
/// own `DefId` but it is type-checked together with the containing item.
///
/// Therefore, when we fetch the
/// `typeck` the closure, for example, we really wind up
/// fetching the `typeck` the enclosing fn item.
/// with the containing item. Therefore, when we fetch the `typeck` of the closure,
/// for example, we really wind up fetching the `typeck` of the enclosing fn item.
pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
let mut def_id = def_id;
while self.is_typeck_child(def_id) {
Expand Down
38 changes: 25 additions & 13 deletions compiler/rustc_query_system/src/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,22 +33,34 @@ fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
move |x| x.0 == *k
}

/// For a particular query, keeps track of "active" keys, i.e. keys whose
/// evaluation has started but has not yet finished successfully.
///
/// (Successful query evaluation for a key is represented by an entry in the
/// query's in-memory cache.)
pub struct QueryState<'tcx, K> {
active: Sharded<hash_table::HashTable<(K, QueryResult<'tcx>)>>,
active: Sharded<hash_table::HashTable<(K, ActiveKeyStatus<'tcx>)>>,
}

/// Indicates the state of a query for a given key in a query map.
enum QueryResult<'tcx> {
/// An already executing query. The query job can be used to await for its completion.
/// For a particular query and key, tracks the status of a query evaluation
/// that has started, but has not yet finished successfully.
///
/// (Successful query evaluation for a key is represented by an entry in the
/// query's in-memory cache.)
enum ActiveKeyStatus<'tcx> {
/// Some thread is already evaluating the query for this key.
///
/// The enclosed [`QueryJob`] can be used to wait for it to finish.
Started(QueryJob<'tcx>),

/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}

impl<'tcx> QueryResult<'tcx> {
/// Unwraps the query job expecting that it has started.
impl<'tcx> ActiveKeyStatus<'tcx> {
/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
/// was poisoned by a panic.
fn expect_job(self) -> QueryJob<'tcx> {
match self {
Self::Started(job) => job,
Expand Down Expand Up @@ -76,9 +88,9 @@ where
) -> Option<()> {
let mut active = Vec::new();

let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult<'tcx>)>>| {
let mut collect = |iter: LockGuard<'_, HashTable<(K, ActiveKeyStatus<'tcx>)>>| {
for (k, v) in iter.iter() {
if let QueryResult::Started(ref job) = *v {
if let ActiveKeyStatus::Started(ref job) = *v {
active.push((*k, job.clone()));
}
}
Expand Down Expand Up @@ -222,7 +234,7 @@ where
Err(_) => panic!(),
Ok(occupied) => {
let ((key, value), vacant) = occupied.remove();
vacant.insert((key, QueryResult::Poisoned));
vacant.insert((key, ActiveKeyStatus::Poisoned));
value.expect_job()
}
}
Expand Down Expand Up @@ -319,7 +331,7 @@ where
let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
match shard.find(key_hash, equivalent_key(&key)) {
// The query we waited on panicked. Continue unwinding here.
Some((_, QueryResult::Poisoned)) => FatalError.raise(),
Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
_ => panic!(
"query '{}' result must be in the cache or the query must be poisoned after a wait",
query.name()
Expand Down Expand Up @@ -373,7 +385,7 @@ where
// state map.
let id = qcx.next_job_id();
let job = QueryJob::new(id, span, current_job_id);
entry.insert((key, QueryResult::Started(job)));
entry.insert((key, ActiveKeyStatus::Started(job)));

// Drop the lock before we start executing the query
drop(state_lock);
Expand All @@ -382,7 +394,7 @@ where
}
Entry::Occupied(mut entry) => {
match &mut entry.get_mut().1 {
QueryResult::Started(job) => {
ActiveKeyStatus::Started(job) => {
if sync::is_dyn_thread_safe() {
// Get the latch out
let latch = job.latch();
Expand All @@ -400,7 +412,7 @@ where
// so we just return the error.
cycle_error(query, qcx, id, span)
}
QueryResult::Poisoned => FatalError.raise(),
ActiveKeyStatus::Poisoned => FatalError.raise(),
}
}
}
Expand Down
Loading