diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
index 372d05809401ff..2b907b2e3d1c24 100644
--- a/src/cmd/compile/internal/devirtualize/devirtualize.go
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -18,9 +18,11 @@ import (
 	"cmd/compile/internal/types"
 )
 
+const go125ImprovedConcreteTypeAnalysis = true
+
 // StaticCall devirtualizes the given call if possible when the concrete callee
 // is available statically.
-func StaticCall(call *ir.CallExpr) {
+func StaticCall(s *State, call *ir.CallExpr) {
 	// For promoted methods (including value-receiver methods promoted
 	// to pointer-receivers), the interface method wrapper may contain
 	// expressions that can panic (e.g., ODEREF, ODOTPTR,
@@ -40,15 +42,31 @@ func StaticCall(call *ir.CallExpr) {
 	}
 
 	sel := call.Fun.(*ir.SelectorExpr)
-	r := ir.StaticValue(sel.X)
-	if r.Op() != ir.OCONVIFACE {
-		return
-	}
-	recv := r.(*ir.ConvExpr)
+	var typ *types.Type
+	if go125ImprovedConcreteTypeAnalysis {
+		typ = concreteType(s, sel.X)
+		if typ == nil {
+			return
+		}
 
-	typ := recv.X.Type()
-	if typ.IsInterface() {
-		return
+		// Don't try to devirtualize calls that we statically know that would have failed at runtime.
+		// This can happen in such case: any(0).(interface {A()}).A(), this typechecks without
+		// any errors, but will cause a runtime panic. We statically know that int(0) does not
+		// implement that interface, thus we skip the devirtualization, as it is not possible
+		// to make an assertion: any(0).(interface{A()}).(int) (int does not implement interface{A()}).
+		if !typecheck.Implements(typ, sel.X.Type()) {
+			return
+		}
+	} else {
+		r := ir.StaticValue(sel.X)
+		if r.Op() != ir.OCONVIFACE {
+			return
+		}
+		recv := r.(*ir.ConvExpr)
+		typ = recv.X.Type()
+		if typ.IsInterface() {
+			return
+		}
 	}
 
 	// If typ is a shape type, then it was a type argument originally
@@ -99,8 +117,27 @@ func StaticCall(call *ir.CallExpr) {
 		return
 	}
 
-	dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
-	dt.SetType(typ)
+	dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, typ)
+
+	if go125ImprovedConcreteTypeAnalysis {
+		// Consider:
+		//
+		//	var v Iface
+		//	v.A()
+		//	v = &Impl{}
+		//
+		// Here in the devirtualizer, we determine the concrete type of v as being an *Impl,
+		// but it can still be a nil interface, we have not detected that. The v.(*Impl)
+		// type assertion that we make here would also have failed, but with a different
+		// panic "pkg.Iface is nil, not *pkg.Impl", where previously we would get a nil panic.
+		// We fix this, by introducing an additional nilcheck on the itab.
+		// Calling a method on an nil interface (in most cases) is a bug in a program, so it is fine
+		// to devirtualize and further (possibly) inline them, even though we would never reach
+		// the called function.
+		dt.UseNilPanic = true
+		dt.SetPos(call.Pos())
+	}
+
 	x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true)
 	switch x.Op() {
 	case ir.ODOTMETH:
@@ -138,3 +175,405 @@ func StaticCall(call *ir.CallExpr) {
 	// Desugar OCALLMETH, if we created one (#57309).
 	typecheck.FixMethodCall(call)
 }
+
+const concreteTypeDebug = false
+
+// concreteType determines the concrete type of n, following OCONVIFACEs and type asserts.
+// Returns nil when the concrete type could not be determined, or when there are multiple
+// (different) types assigned to an interface.
+func concreteType(s *State, n ir.Node) (typ *types.Type) {
+	typ = concreteType1(s, n, make(map[*ir.Name]struct{}))
+	if typ != nil && typ.IsInterface() {
+		base.Fatalf("typ.IsInterface() = true; want = false; typ = %v", typ)
+	}
+	if typ == &noType {
+		return nil
+	}
+	return typ
+}
+
+// noType is a sentinel value returned by [concreteType1].
+var noType types.Type
+
+// concreteType1 analyzes the node n and returns its concrete type if it is statically known.
+// Otherwise, it returns a nil Type, indicating that a concrete type was not determined.
+// When n is known to be statically nil or a self-assignment is detected, in returns a sentinel [noType] type instead.
+func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types.Type) {
+	nn := n // for debug messages
+
+	if concreteTypeDebug {
+		defer func() {
+			t := "&noType"
+			if outT != &noType {
+				t = outT.String()
+			}
+			base.Warn("concreteType1(%v) -> %v", nn, t)
+		}()
+	}
+
+	for {
+		if concreteTypeDebug {
+			base.Warn("concreteType1(%v): analyzing %v", nn, n)
+		}
+
+		if !n.Type().IsInterface() {
+			return n.Type()
+		}
+
+		switch n1 := n.(type) {
+		case *ir.ConvExpr:
+			if n1.Op() == ir.OCONVNOP {
+				if !n1.Type().IsInterface() || !types.Identical(n1.Type(), n1.X.Type()) {
+					// As we check (directly before this switch) whether n is an interface, thus we should only reach
+					// here for iface conversions where both operands are the same.
+					base.Fatalf("not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type())
+				}
+				n = n1.X
+				continue
+			}
+			if n1.Op() == ir.OCONVIFACE {
+				n = n1.X
+				continue
+			}
+		case *ir.InlinedCallExpr:
+			if n1.Op() == ir.OINLCALL {
+				n = n1.SingleResult()
+				continue
+			}
+		case *ir.ParenExpr:
+			n = n1.X
+			continue
+		case *ir.TypeAssertExpr:
+			n = n1.X
+			continue
+		}
+		break
+	}
+
+	if n.Op() != ir.ONAME {
+		return nil
+	}
+
+	name := n.(*ir.Name).Canonical()
+	if name.Class != ir.PAUTO {
+		return nil
+	}
+
+	if name.Op() != ir.ONAME {
+		base.Fatalf("reassigned %v", name)
+	}
+
+	// name.Curfn must be set, as we checked name.Class != ir.PAUTO before.
+	if name.Curfn == nil {
+		base.Fatalf("name.Curfn = nil; want not nil")
+	}
+
+	if name.Addrtaken() {
+		return nil // conservatively assume it's reassigned with a different type indirectly
+	}
+
+	if _, ok := seen[name]; ok {
+		// Self assignment, treat it the same as a nil assignment.
+		// In case this is the only assignment then we are not going to devirtualize anything.
+		// In case there are other assignment, we still preserve the correct type.
+		return &noType
+	}
+	seen[name] = struct{}{}
+
+	if concreteTypeDebug {
+		base.Warn("concreteType1(%v): analyzing assignments to %v", nn, name)
+	}
+
+	var typ *types.Type
+	for _, v := range s.assignments(name) {
+		var t *types.Type
+		switch v := v.(type) {
+		case *types.Type:
+			t = v
+		case ir.Node:
+			t = concreteType1(s, v, seen)
+			if t == &noType {
+				continue
+			}
+		}
+		if t == nil || (typ != nil && !types.Identical(typ, t)) {
+			return nil
+		}
+		typ = t
+	}
+
+	delete(seen, name)
+
+	if typ == nil {
+		// Variable either declared with zero value, or only assigned with nil.
+		return &noType
+	}
+
+	return typ
+}
+
+// assignment can be one of:
+// - nil - assignment to an interface type.
+// - *types.Type - assignment to a concrete type (non-interface).
+// - ir.Node - assignment to a ir.Node.
+//
+// In most cases assignment should be an [ir.Node], but in cases where we
+// do not follow the data-flow, we return either a concrete type (*types.Type) or a nil.
+// For example in range over a slice, if the slice elem is of an interface type, then we return
+// a nil, otherwise the elem's concrete type (We do so because we do not analyze assignment to the
+// slice being ranged-over).
+type assignment any
+
+// State holds precomputed state for use in [StaticCall].
+type State struct {
+	// ifaceAssignments maps interface variables to all their assignments
+	// defined inside functions stored in the analyzedFuncs set.
+	// Note: it does not include direct assignments to nil.
+	ifaceAssignments map[*ir.Name][]assignment
+
+	// ifaceCallExprAssigns stores every [*ir.CallExpr], which has an interface
+	// result, that is assigned to a variable.
+	ifaceCallExprAssigns map[*ir.CallExpr][]ifaceAssignRef
+
+	// analyzedFuncs is a set of Funcs that were analyzed for iface assignments.
+	analyzedFuncs map[*ir.Func]struct{}
+}
+
+type ifaceAssignRef struct {
+	name           *ir.Name // ifaceAssignments[name]
+	valOrTypeIndex int      // ifaceAssignments[name][valOrTypeIndex]
+	returnIndex    int      // (*ir.CallExpr).Result(returnIndex)
+}
+
+// InlinedCall updates the [State] to take into account a newly inlined call.
+func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir.InlinedCallExpr) {
+	if _, ok := s.analyzedFuncs[fun]; !ok {
+		// Full analyze has not been yet executed for the provided function, so we can skip it for now.
+		// When no devirtualization happens in a function, it is unnecessary to analyze it.
+		return
+	}
+
+	// Analyze assignments in the newly inlined function.
+	s.analyze(inlinedCall.Init())
+	s.analyze(inlinedCall.Body)
+
+	refs, ok := s.ifaceCallExprAssigns[origCall]
+	if !ok {
+		return
+	}
+	delete(s.ifaceCallExprAssigns, origCall)
+
+	// Update assignments to reference the new ReturnVars of the inlined call.
+	for _, ref := range refs {
+		vt := &s.ifaceAssignments[ref.name][ref.valOrTypeIndex]
+		if *vt != nil {
+			base.Fatalf("unexpected non-nil assignment")
+		}
+		if concreteTypeDebug {
+			base.Warn(
+				"InlinedCall(%v, %v): replacing interface node in (%v,%v) to %v (typ %v)",
+				origCall, inlinedCall, ref.name, ref.valOrTypeIndex,
+				inlinedCall.ReturnVars[ref.returnIndex],
+				inlinedCall.ReturnVars[ref.returnIndex].Type(),
+			)
+		}
+		*vt = inlinedCall.ReturnVars[ref.returnIndex]
+	}
+}
+
+// assignments returns all assignments to n.
+func (s *State) assignments(n *ir.Name) []assignment {
+	fun := n.Curfn
+	if fun == nil {
+		base.Fatalf("n.Curfn = <nil>")
+	}
+
+	if !n.Type().IsInterface() {
+		base.Fatalf("name passed to assignments is not of an interface type: %v", n.Type())
+	}
+
+	// Analyze assignments in func, if not analyzed before.
+	if _, ok := s.analyzedFuncs[fun]; !ok {
+		if concreteTypeDebug {
+			base.Warn("concreteType(): analyzing assignments in %v func", fun)
+		}
+		if s.analyzedFuncs == nil {
+			s.ifaceAssignments = make(map[*ir.Name][]assignment)
+			s.ifaceCallExprAssigns = make(map[*ir.CallExpr][]ifaceAssignRef)
+			s.analyzedFuncs = make(map[*ir.Func]struct{})
+		}
+		s.analyzedFuncs[fun] = struct{}{}
+		s.analyze(fun.Init())
+		s.analyze(fun.Body)
+	}
+
+	return s.ifaceAssignments[n]
+}
+
+// analyze analyzes every assignment to interface variables in nodes, updating [State].
+func (s *State) analyze(nodes ir.Nodes) {
+	assign := func(name ir.Node, assignment assignment) (*ir.Name, int) {
+		if name == nil || name.Op() != ir.ONAME || ir.IsBlank(name) {
+			return nil, -1
+		}
+
+		n, ok := ir.OuterValue(name).(*ir.Name)
+		if !ok || n.Curfn == nil {
+			return nil, -1
+		}
+
+		// Do not track variables that are not of interface types.
+		// For devirtualization they are unnecessary, we will not even look them up.
+		if !n.Type().IsInterface() {
+			return nil, -1
+		}
+
+		n = n.Canonical()
+		if n.Op() != ir.ONAME {
+			base.Fatalf("reassigned %v", n)
+		}
+
+		switch a := assignment.(type) {
+		case nil:
+		case *types.Type:
+			if a != nil && a.IsInterface() {
+				assignment = nil // non-concrete type
+			}
+		case ir.Node:
+			// nil assignment, we can safely ignore them, see [StaticCall].
+			if ir.IsNil(a) {
+				return nil, -1
+			}
+		default:
+			base.Fatalf("unexpected type: %v", assignment)
+		}
+
+		if concreteTypeDebug {
+			base.Warn("analyze(): assignment found %v = %v", name, assignment)
+		}
+
+		s.ifaceAssignments[n] = append(s.ifaceAssignments[n], assignment)
+		return n, len(s.ifaceAssignments[n]) - 1
+	}
+
+	var do func(n ir.Node)
+	do = func(n ir.Node) {
+		switch n.Op() {
+		case ir.OAS:
+			n := n.(*ir.AssignStmt)
+			if n.Y != nil {
+				rhs := n.Y
+				for {
+					if r, ok := rhs.(*ir.ParenExpr); ok {
+						rhs = r.X
+						continue
+					}
+					break
+				}
+				if call, ok := rhs.(*ir.CallExpr); ok && call.Fun != nil {
+					retTyp := call.Fun.Type().Results()[0].Type
+					n, idx := assign(n.X, retTyp)
+					if n != nil && retTyp.IsInterface() {
+						s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, 0})
+					}
+				} else {
+					assign(n.X, rhs)
+				}
+			}
+		case ir.OAS2:
+			n := n.(*ir.AssignListStmt)
+			for i, p := range n.Lhs {
+				if n.Rhs[i] != nil {
+					assign(p, n.Rhs[i])
+				}
+			}
+		case ir.OAS2DOTTYPE:
+			n := n.(*ir.AssignListStmt)
+			if n.Rhs[0] == nil {
+				base.Fatalf("n.Rhs[0] == nil; n = %v", n)
+			}
+			assign(n.Lhs[0], n.Rhs[0])
+			assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize
+		case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2:
+			n := n.(*ir.AssignListStmt)
+			if n.Rhs[0] == nil {
+				base.Fatalf("n.Rhs[0] == nil; n = %v", n)
+			}
+			assign(n.Lhs[0], n.Rhs[0].Type())
+			assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize
+		case ir.OAS2FUNC:
+			n := n.(*ir.AssignListStmt)
+			rhs := n.Rhs[0]
+			for {
+				if r, ok := rhs.(*ir.ParenExpr); ok {
+					rhs = r.X
+					continue
+				}
+				break
+			}
+			if call, ok := rhs.(*ir.CallExpr); ok {
+				for i, p := range n.Lhs {
+					retTyp := call.Fun.Type().Results()[i].Type
+					n, idx := assign(p, retTyp)
+					if n != nil && retTyp.IsInterface() {
+						s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, i})
+					}
+				}
+			} else if call, ok := rhs.(*ir.InlinedCallExpr); ok {
+				for i, p := range n.Lhs {
+					assign(p, call.ReturnVars[i])
+				}
+			} else {
+				// TODO: can we reach here?
+				for _, p := range n.Lhs {
+					assign(p, nil)
+				}
+			}
+		case ir.ORANGE:
+			n := n.(*ir.RangeStmt)
+			xTyp := n.X.Type()
+
+			// Range over an array pointer.
+			if xTyp.IsPtr() && xTyp.Elem().IsArray() {
+				xTyp = xTyp.Elem()
+			}
+
+			if xTyp.IsArray() || xTyp.IsSlice() {
+				assign(n.Key, nil) //  inteager does not have methods to devirtualize
+				assign(n.Value, xTyp.Elem())
+			} else if xTyp.IsChan() {
+				assign(n.Key, xTyp.Elem())
+				base.Assertf(n.Value == nil, "n.Value != nil in range over chan")
+			} else if xTyp.IsMap() {
+				assign(n.Key, xTyp.Key())
+				assign(n.Value, xTyp.Elem())
+			} else if xTyp.IsInteger() || xTyp.IsString() {
+				// Range over int/string, results do not have methods, so nothing to devirtualize.
+				assign(n.Key, nil)
+				assign(n.Value, nil)
+			} else {
+				// We will not reach here in case of an range-over-func, as it is
+				// rewrtten to function calls in the noder package.
+				base.Fatalf("range over unexpected type %v", n.X.Type())
+			}
+		case ir.OSWITCH:
+			n := n.(*ir.SwitchStmt)
+			if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+				for _, v := range n.Cases {
+					if v.Var == nil {
+						base.Assert(guard.Tag == nil)
+						continue
+					}
+					assign(v.Var, guard.X)
+				}
+			}
+		case ir.OCLOSURE:
+			n := n.(*ir.ClosureExpr)
+			if _, ok := s.analyzedFuncs[n.Func]; !ok {
+				s.analyzedFuncs[n.Func] = struct{}{}
+				ir.Visit(n.Func, do)
+			}
+		}
+	}
+	ir.VisitList(nodes, do)
+}
diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go
index a35121517ac001..b74c3cb72d9c15 100644
--- a/src/cmd/compile/internal/inline/interleaved/interleaved.go
+++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go
@@ -45,6 +45,8 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
 	inlState := make(map[*ir.Func]*inlClosureState)
 	calleeUseCounts := make(map[*ir.Func]int)
 
+	var state devirtualize.State
+
 	// Pre-process all the functions, adding parentheses around call sites and starting their "inl state".
 	for _, fn := range typecheck.Target.Funcs {
 		bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn)
@@ -58,7 +60,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
 
 		// Do a first pass at counting call sites.
 		for i := range s.parens {
-			s.resolve(i)
+			s.resolve(&state, i)
 		}
 	}
 
@@ -102,10 +104,11 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
 					for {
 						for i := l0; i < l1; i++ { // can't use "range parens" here
 							paren := s.parens[i]
-							if new := s.edit(i); new != nil {
+							if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil {
 								// Update AST and recursively mark nodes.
-								paren.X = new
-								ir.EditChildren(new, s.mark) // mark may append to parens
+								paren.X = inlinedCall
+								ir.EditChildren(inlinedCall, s.mark) // mark may append to parens
+								state.InlinedCall(s.fn, origCall, inlinedCall)
 								done = false
 							}
 						}
@@ -114,7 +117,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
 							break
 						}
 						for i := l0; i < l1; i++ {
-							s.resolve(i)
+							s.resolve(&state, i)
 						}
 
 					}
@@ -188,7 +191,7 @@ type inlClosureState struct {
 // resolve attempts to resolve a call to a potentially inlineable callee
 // and updates use counts on the callees.  Returns the call site count
 // for that callee.
-func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
+func (s *inlClosureState) resolve(state *devirtualize.State, i int) (*ir.Func, int) {
 	p := s.parens[i]
 	if i < len(s.resolved) {
 		if callee := s.resolved[i]; callee != nil {
@@ -200,7 +203,7 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
 	if !ok { // previously inlined
 		return nil, -1
 	}
-	devirtualize.StaticCall(call)
+	devirtualize.StaticCall(state, call)
 	if callee := inline.InlineCallTarget(s.fn, call, s.profile); callee != nil {
 		for len(s.resolved) <= i {
 			s.resolved = append(s.resolved, nil)
@@ -213,23 +216,23 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
 	return nil, 0
 }
 
-func (s *inlClosureState) edit(i int) ir.Node {
+func (s *inlClosureState) edit(state *devirtualize.State, i int) (*ir.CallExpr, *ir.InlinedCallExpr) {
 	n := s.parens[i].X
 	call, ok := n.(*ir.CallExpr)
 	if !ok {
-		return nil
+		return nil, nil
 	}
 	// This is redundant with earlier calls to
 	// resolve, but because things can change it
 	// must be re-checked.
-	callee, count := s.resolve(i)
+	callee, count := s.resolve(state, i)
 	if count <= 0 {
-		return nil
+		return nil, nil
 	}
 	if inlCall := inline.TryInlineCall(s.fn, call, s.bigCaller, s.profile, count == 1 && callee.ClosureParent != nil); inlCall != nil {
-		return inlCall
+		return call, inlCall
 	}
-	return nil
+	return nil, nil
 }
 
 // Mark inserts parentheses, and is called repeatedly.
@@ -338,16 +341,18 @@ func (s *inlClosureState) unparenthesize() {
 // returns.
 func (s *inlClosureState) fixpoint() bool {
 	changed := false
+	var state devirtualize.State
 	ir.WithFunc(s.fn, func() {
 		done := false
 		for !done {
 			done = true
 			for i := 0; i < len(s.parens); i++ { // can't use "range parens" here
 				paren := s.parens[i]
-				if new := s.edit(i); new != nil {
+				if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil {
 					// Update AST and recursively mark nodes.
-					paren.X = new
-					ir.EditChildren(new, s.mark) // mark may append to parens
+					paren.X = inlinedCall
+					ir.EditChildren(inlinedCall, s.mark) // mark may append to parens
+					state.InlinedCall(s.fn, origCall, inlinedCall)
 					done = false
 					changed = true
 				}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index 5bd26fc14562f6..dd3e291de8e6ab 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -677,6 +677,11 @@ type TypeAssertExpr struct {
 
 	// An internal/abi.TypeAssert descriptor to pass to the runtime.
 	Descriptor *obj.LSym
+
+	// When set to true, if this assert would panic, then use a nil pointer panic
+	// instead of an interface conversion panic.
+	// It must not be set for type asserts using the commaok form.
+	UseNilPanic bool
 }
 
 func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index eca66487fa26da..bdfef70f216527 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -2941,6 +2941,7 @@ func (r *reader) multiExpr() []ir.Node {
 		as.Def = true
 		for i := range results {
 			tmp := r.temp(pos, r.typ())
+			tmp.Defn = as
 			as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp))
 			as.Lhs.Append(tmp)
 
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 07269e65f2fdda..8f9b83e93af7c0 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -5642,6 +5642,25 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
 	if n.ITab != nil {
 		targetItab = s.expr(n.ITab)
 	}
+
+	if n.UseNilPanic {
+		if commaok {
+			base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && commaok == true")
+		}
+		if n.Type().IsInterface() {
+			// Currently we do not expect the compiler to emit type asserts with UseNilPanic, that assert to an interface type.
+			// If needed, this can be relaxed in the future, but for now we can assert that.
+			base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && Type().IsInterface() == true")
+		}
+		typs := s.f.Config.Types
+		iface = s.newValue2(
+			ssa.OpIMake,
+			iface.Type,
+			s.nilCheck(s.newValue1(ssa.OpITab, typs.BytePtr, iface)),
+			s.newValue1(ssa.OpIData, typs.BytePtr, iface),
+		)
+	}
+
 	return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor)
 }
 
diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go
index b3b4e77f578e60..1117c9903c3b6d 100644
--- a/src/crypto/sha256/sha256_test.go
+++ b/src/crypto/sha256/sha256_test.go
@@ -406,3 +406,17 @@ func BenchmarkHash1K(b *testing.B) {
 func BenchmarkHash8K(b *testing.B) {
 	benchmarkSize(b, 8192)
 }
+
+func TestAllocatonsWithTypeAsserts(t *testing.T) {
+	cryptotest.SkipTestAllocations(t)
+	allocs := testing.AllocsPerRun(100, func() {
+		h := New()
+		h.Write([]byte{1, 2, 3})
+		marshaled, _ := h.(encoding.BinaryMarshaler).MarshalBinary()
+		marshaled, _ = h.(encoding.BinaryAppender).AppendBinary(marshaled[:0])
+		h.(encoding.BinaryUnmarshaler).UnmarshalBinary(marshaled)
+	})
+	if allocs != 0 {
+		t.Fatalf("allocs = %v; want = 0", allocs)
+	}
+}
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index bba66ba48fea35..276bb901a6f9c7 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -348,6 +348,11 @@ func (h inlineWrapper) dump(pcs []uintptr) {
 
 func inlinedWrapperCallerDump(pcs []uintptr) {
 	var h inlineWrapperInterface
+
+	// Take the address of h, such that h.dump() call (below)
+	// does not get devirtualized by the compiler.
+	_ = &h
+
 	h = &inlineWrapper{}
 	h.dump(pcs)
 }
diff --git a/test/devirtualization.go b/test/devirtualization.go
new file mode 100644
index 00000000000000..e3319052945e00
--- /dev/null
+++ b/test/devirtualization.go
@@ -0,0 +1,1277 @@
+// errorcheck -0 -m
+
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+type M interface{ M() }
+
+type A interface{ A() }
+
+type C interface{ C() }
+
+type Impl struct{}
+
+func (*Impl) M() {} // ERROR "can inline \(\*Impl\).M$"
+
+func (*Impl) A() {} // ERROR "can inline \(\*Impl\).A$"
+
+type Impl2 struct{}
+
+func (*Impl2) M() {} // ERROR "can inline \(\*Impl2\).M$"
+
+func (*Impl2) A() {} // ERROR "can inline \(\*Impl2\).A$"
+
+type CImpl struct{}
+
+func (CImpl) C() {} // ERROR "can inline CImpl.C$"
+
+func typeAsserts() {
+	var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+
+	a.(M).M()     // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	a.(A).A()     // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	a.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M"
+	a.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A"
+
+	v := a.(M)
+	v.M()         // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+	v.(A).A()     // ERROR "devirtualizing v.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	v.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A"
+	v.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M"
+
+	v2 := a.(A)
+	v2.A()         // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	v2.(M).M()     // ERROR "devirtualizing v2.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	v2.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A"
+	v2.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M"
+
+	a.(M).(A).A() // ERROR "devirtualizing a.\(M\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	a.(A).(M).M() // ERROR "devirtualizing a.\(A\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+
+	a.(M).(A).(*Impl).A() // ERROR "inlining call to \(\*Impl\).A"
+	a.(A).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M"
+
+	any(a).(M).M()           // ERROR "devirtualizing any\(a\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	any(a).(A).A()           // ERROR "devirtualizing any\(a\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	any(a).(M).(any).(A).A() // ERROR "devirtualizing any\(a\).\(M\).\(any\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+
+	c := any(a)
+	c.(A).A() // ERROR "devirtualizing c.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	c.(M).M() // ERROR "devirtualizing c.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+
+	M(a).M()    // ERROR "devirtualizing M\(a\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	M(M(a)).M() // ERROR "devirtualizing M\(M\(a\)\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+
+	a2 := a.(A)
+	A(a2).A()    // ERROR "devirtualizing A\(a2\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	A(A(a2)).A() // ERROR "devirtualizing A\(A\(a2\)\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+
+	{
+		var a C = &CImpl{}   // ERROR "&CImpl{} does not escape$"
+		a.(any).(C).C()      // ERROR "devirtualizing a.\(any\).\(C\).C to \*CImpl$" "inlining call to CImpl.C"
+		a.(any).(*CImpl).C() // ERROR "inlining call to CImpl.C"
+	}
+}
+
+func typeAssertsWithOkReturn() {
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		if v, ok := a.(M); ok {
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		if v, ok := a.(A); ok {
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		v, ok := a.(M)
+		if ok {
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		v, ok := a.(A)
+		if ok {
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		v, ok := a.(*Impl)
+		if ok {
+			v.A() // ERROR "inlining call to \(\*Impl\).A"
+			v.M() // ERROR "inlining call to \(\*Impl\).M"
+		}
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		v, _ := a.(M)
+		v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		v, _ := a.(A)
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		v, _ := a.(*Impl)
+		v.A() // ERROR "inlining call to \(\*Impl\).A"
+		v.M() // ERROR "inlining call to \(\*Impl\).M"
+	}
+	{
+		a := newM() // ERROR "&Impl{} does not escape$" "inlining call to newM"
+		callA(a)    // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA"
+		callIfA(a)  // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA"
+	}
+	{
+		_, a := newM2ret() // ERROR "&Impl{} does not escape$" "inlining call to newM2ret"
+		callA(a)           // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA"
+		callIfA(a)         // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA"
+	}
+	{
+		var a M = &Impl{} // ERROR "&Impl{} does not escape$"
+		// Note the !ok condition, devirtualizing here is fine.
+		if v, ok := a.(M); !ok {
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+	}
+	{
+		var a A = newImplNoInline()
+		if v, ok := a.(M); ok {
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+	}
+	{
+		var impl2InA A = &Impl2{} // ERROR "&Impl2{} does not escape$"
+		var a A
+		a, _ = impl2InA.(*Impl)
+		// a now contains the zero value of *Impl
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		a := newANoInline()
+		a.A()
+	}
+	{
+		_, a := newANoInlineRet2()
+		a.A()
+	}
+}
+
+func newM() M { // ERROR "can inline newM$"
+	return &Impl{} // ERROR "&Impl{} escapes to heap$"
+}
+
+func newM2ret() (int, M) { // ERROR "can inline newM2ret$"
+	return -1, &Impl{} // ERROR "&Impl{} escapes to heap$"
+}
+
+func callA(m M) { // ERROR "can inline callA$" "leaking param: m$"
+	m.(A).A()
+}
+
+func callIfA(m M) { // ERROR "can inline callIfA$" "leaking param: m$"
+	if v, ok := m.(A); ok {
+		v.A()
+	}
+}
+
+//go:noinline
+func newImplNoInline() *Impl {
+	return &Impl{} // ERROR "&Impl{} escapes to heap$"
+}
+
+//go:noinline
+func newImpl2ret2() (string, *Impl2) {
+	return "str", &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+}
+
+//go:noinline
+func newImpl2() *Impl2 {
+	return &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+}
+
+//go:noinline
+func newANoInline() A {
+	return &Impl{} // ERROR "&Impl{} escapes to heap$"
+}
+
+//go:noinline
+func newANoInlineRet2() (string, A) {
+	return "", &Impl{} // ERROR "&Impl{} escapes to heap$"
+}
+
+func testTypeSwitch() {
+	{
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		switch v := v.(type) {
+		case A:
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		case M:
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		switch v := v.(type) {
+		case A:
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		case M:
+			v.M()       // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+			v = &Impl{} // ERROR "&Impl{} does not escape$"
+			v.M()       // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+		v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		switch v1 := v.(type) {
+		case A:
+			v1.A()
+		case M:
+			v1.M()
+			v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		}
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		switch v := v.(type) {
+		case A:
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		case M:
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		case C:
+			v.C()
+		}
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		switch v := v.(type) {
+		case M:
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		default:
+			panic("does not implement M") // ERROR ".does not implement M. escapes to heap$"
+		}
+	}
+}
+
+func differentTypeAssign() {
+	{
+		var a A
+		a = &Impl{}  // ERROR "&Impl{} escapes to heap$"
+		a = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		a.A()
+	}
+	{
+		a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$"
+		a = &Impl2{}    // ERROR "&Impl2{} escapes to heap$"
+		a.A()
+	}
+	{
+		a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$"
+		a.A()
+		a = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+	}
+	{
+		a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$"
+		a = &Impl2{}    // ERROR "&Impl2{} escapes to heap$"
+		var asAny any = a
+		asAny.(A).A()
+	}
+	{
+		a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$"
+		var asAny any = a
+		asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		asAny.(A).A()
+	}
+	{
+		a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$"
+		var asAny any = a
+		asAny.(A).A()
+		asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		a.A()            // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A
+		a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		a = newImpl2()
+		a.A()
+	}
+	{
+		var a A
+		a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		_, a = newImpl2ret2()
+		a.A()
+	}
+}
+
+func assignWithTypeAssert() {
+	{
+		var i1 A = &Impl{}  // ERROR "&Impl{} does not escape$"
+		var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$"
+		i1 = i2.(*Impl)     // this will panic
+		i1.A()              // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		i2.A()              // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A"
+	}
+	{
+		var i1 A = &Impl{}  // ERROR "&Impl{} does not escape$"
+		var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$"
+		i1, _ = i2.(*Impl)  // i1 is going to be nil
+		i1.A()              // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		i2.A()              // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A"
+	}
+}
+
+func nilIface() {
+	{
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		v = nil
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		v.A()             // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		v = nil
+	}
+	{
+		var nilIface A
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		v.A()             // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		v = nilIface
+	}
+	{
+		var nilIface A
+		var v A = &Impl{} // ERROR "&Impl{} does not escape$"
+		v = nilIface
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		v.A()       // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		v = &Impl{} // ERROR "&Impl{} does not escape$"
+	}
+	{
+		var v A
+		var v2 A = v
+		v2.A()       // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		v2 = &Impl{} // ERROR "&Impl{} does not escape$"
+	}
+	{
+		var v A
+		v.A()
+	}
+	{
+		var v A
+		var v2 A = v
+		v2.A()
+	}
+	{
+		var v A
+		var v2 A
+		v2 = v
+		v2.A()
+	}
+}
+
+func longDevirtTest() {
+	var a interface {
+		M
+		A
+	} = &Impl{} // ERROR "&Impl{} does not escape$"
+
+	{
+		var b A = a
+		b.A()     // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var b M = a
+		b.M()     // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var b A = a.(M).(A)
+		b.A()     // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var b M = a.(A).(M)
+		b.M()     // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+
+	if v, ok := a.(A); ok {
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+
+	if v, ok := a.(M); ok {
+		v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+
+	{
+		var c A = a
+
+		if v, ok := c.(A); ok {
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+
+		c = &Impl{} // ERROR "&Impl{} does not escape$"
+
+		if v, ok := c.(M); ok {
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+		}
+
+		if v, ok := c.(interface {
+			A
+			M
+		}); ok {
+			v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M"
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+	}
+}
+
+func deferDevirt() {
+	var a A
+	defer func() { // ERROR "can inline deferDevirt.func1$" "func literal does not escape$"
+		a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	}()
+	a = &Impl{} // ERROR "&Impl{} does not escape$"
+	a.A()       // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+}
+
+func deferNoDevirt() {
+	var a A
+	defer func() { // ERROR "can inline deferNoDevirt.func1$" "func literal does not escape$"
+		a = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+	}()
+	a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	a.A()
+}
+
+//go:noinline
+func closureDevirt() {
+	var a A
+	func() { // ERROR "func literal does not escape$"
+		// defer so that it does not lnline.
+		defer func() {}() // ERROR "can inline closureDevirt.func1.1$" "func literal does not escape$"
+		a = &Impl{}       // ERROR "&Impl{} escapes to heap$"
+	}()
+	a = &Impl{} // ERROR "&Impl{} does not escape$"
+	a.A()       // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+}
+
+//go:noinline
+func closureNoDevirt() {
+	var a A
+	func() { // ERROR "func literal does not escape$"
+		// defer so that it does not lnline.
+		defer func() {}() // ERROR "can inline closureNoDevirt.func1.1$" "func literal does not escape$"
+		a = &Impl2{}      // ERROR "&Impl2{} escapes to heap$"
+	}()
+	a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	a.A()
+}
+
+var global = "1"
+
+func closureDevirt2() {
+	var a A
+	a = &Impl{}   // ERROR "&Impl{} does not escape$"
+	c := func() { // ERROR "can inline closureDevirt2.func1$" "func literal does not escape$"
+		a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	}
+	if global == "1" {
+		c = func() { // ERROR "can inline closureDevirt2.func2$" "func literal does not escape$"
+			a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		}
+	}
+	a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	c()
+}
+
+func closureNoDevirt2() {
+	var a A
+	a = &Impl{}   // ERROR "&Impl{} escapes to heap$"
+	c := func() { // ERROR "can inline closureNoDevirt2.func1$" "func literal does not escape$"
+		a = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+	}
+	if global == "1" {
+		c = func() { // ERROR "can inline closureNoDevirt2.func2$" "func literal does not escape$"
+			a = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		}
+	}
+	a.A()
+	c()
+}
+
+//go:noinline
+func closureDevirt3() {
+	var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+	func() {          // ERROR "func literal does not escape$"
+		// defer so that it does not lnline.
+		defer func() {}() // ERROR "can inline closureDevirt3.func1.1$" "func literal does not escape$"
+		a.A()             // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}()
+	func() { // ERROR "can inline closureDevirt3.func2$"
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}() // ERROR "inlining call to closureDevirt3.func2" "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+}
+
+//go:noinline
+func closureNoDevirt3() {
+	var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	func() {          // ERROR "func literal does not escape$"
+		// defer so that it does not lnline.
+		defer func() {}() // ERROR "can inline closureNoDevirt3.func1.1$" "func literal does not escape$"
+		a.A()
+	}()
+	func() { // ERROR "can inline closureNoDevirt3.func2$"
+		a.A()
+	}() // ERROR "inlining call to closureNoDevirt3.func2"
+	a = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+}
+
+//go:noinline
+func varDeclaredInClosureReferencesOuter() {
+	var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+	func() {          // ERROR "func literal does not escape$"
+		// defer for noinline
+		defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func1.1$" "func literal does not escape$"
+		var v A = a
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}()
+	func() { // ERROR "func literal does not escape$"
+		// defer for noinline
+		defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func2.1$" "func literal does not escape$"
+		var v A = a
+		v = &Impl{} // ERROR "&Impl{} does not escape$"
+		v.A()       // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}()
+
+	var b A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	func() {          // ERROR "func literal does not escape$"
+		// defer for noinline
+		defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func3.1$" "func literal does not escape$"
+		var v A = b
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}()
+	func() { // ERROR "func literal does not escape$"
+		// defer for noinline
+		defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func4.1$" "func literal does not escape$"
+		var v A = b
+		v.A()
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+	}()
+}
+
+//go:noinline
+func testNamedReturn0() (v A) {
+	v = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	v.A()
+	return
+}
+
+//go:noinline
+func testNamedReturn1() (v A) {
+	v = &Impl{} // ERROR "&Impl{} escapes to heap$"
+	v.A()
+	return &Impl{} // ERROR "&Impl{} escapes to heap$"
+}
+
+func testNamedReturns3() (v A) {
+	v = &Impl{}    // ERROR "&Impl{} escapes to heap$"
+	defer func() { // ERROR "can inline testNamedReturns3.func1$" "func literal does not escape$"
+		v.A()
+	}()
+	v.A()
+	return &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+}
+
+var (
+	globalImpl    = &Impl{}
+	globalImpl2   = &Impl2{}
+	globalA     A = &Impl{}
+	globalM     M = &Impl{}
+)
+
+func globals() {
+	{
+		globalA.A()
+		globalA.(M).M()
+		globalM.M()
+		globalM.(A).A()
+
+		a := globalA
+		a.A()
+		a.(M).M()
+
+		m := globalM
+		m.M()
+		m.(A).A()
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		a = globalImpl
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		a = A(globalImpl)
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		a = M(globalImpl).(A)
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		a = globalA.(*Impl)
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		a = globalM.(*Impl)
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		a = globalImpl2
+		a.A()
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		a = globalA
+		a.A()
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		a = globalM.(A)
+		a.A()
+	}
+}
+
+func mapsDevirt() {
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v A = m[0]
+		v.A()     // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v A
+		var ok bool
+		if v, ok = m[0]; ok {
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v A
+		v, _ = m[0]
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+}
+
+func mapsNoDevirt() {
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v A = m[0]
+		v.A()
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.(M).M()
+	}
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v A
+		var ok bool
+		if v, ok = m[0]; ok {
+			v.A()
+		}
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v A = &Impl{}        // ERROR "&Impl{} escapes to heap$"
+		v, _ = m[0]
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}
+
+	{
+		m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$"
+		var v A = &Impl{}    // ERROR "&Impl{} escapes to heap$"
+		v = m[0]
+		v.A()
+	}
+	{
+		m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$"
+		var v A = &Impl{}    // ERROR "&Impl{} escapes to heap$"
+		var ok bool
+		if v, ok = m[0]; ok {
+			v.A()
+		}
+		v.A()
+	}
+	{
+		m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$"
+		var v A = &Impl{}    // ERROR "&Impl{} escapes to heap$"
+		v, _ = m[0]
+		v.A()
+	}
+}
+
+func chanDevirt() {
+	{
+		m := make(chan *Impl)
+		var v A = <-m
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		v = <-m
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		v, _ = <-m
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		var ok bool
+		if v, ok = <-m; ok {
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		var ok bool
+		if v, ok = <-m; ok {
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+		select {
+		case <-m:
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		case v = <-m:
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		case v, ok = <-m:
+			v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+	}
+}
+
+func chanNoDevirt() {
+	{
+		m := make(chan *Impl)
+		var v A = <-m
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		v = <-m
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		v, _ = <-m
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}
+	{
+		m := make(chan *Impl)
+		var v A
+		var ok bool
+		if v, ok = <-m; ok {
+			v.A()
+		}
+		v = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		v.A()
+	}
+	{
+		m := make(chan *Impl)
+		var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		var ok bool
+		if v, ok = <-m; ok {
+			v.A()
+		}
+	}
+	{
+		m := make(chan *Impl)
+		var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		select {
+		case v = <-m:
+			v.A()
+		}
+		v.A()
+	}
+	{
+		m := make(chan *Impl)
+		var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		select {
+		case v, _ = <-m:
+			v.A()
+		}
+		v.A()
+	}
+
+	{
+		m := make(chan A)
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		v = <-m
+		v.A()
+	}
+	{
+		m := make(chan A)
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		v, _ = <-m
+		v.A()
+	}
+	{
+		m := make(chan A)
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		var ok bool
+		if v, ok = <-m; ok {
+			v.A()
+		}
+	}
+	{
+		m := make(chan A)
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		select {
+		case v = <-m:
+			v.A()
+		}
+		v.A()
+	}
+	{
+		m := make(chan A)
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		select {
+		case v, _ = <-m:
+			v.A()
+		}
+		v.A()
+	}
+}
+
+func rangeDevirt() {
+	{
+		var v A
+		m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$"
+		v = &Impl{}                   // ERROR "&Impl{} does not escape$"
+		for v = range m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$"
+		v = &Impl{}                // ERROR "&Impl{} does not escape$"
+		for v = range m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$"
+		v = &Impl{}                // ERROR "&Impl{} does not escape$"
+		for _, v = range m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		m := make(chan *Impl)
+		v = &Impl{} // ERROR "&Impl{} does not escape$"
+		for v = range m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$"
+		v = &Impl{}    // ERROR "&Impl{} does not escape$"
+		for _, v = range m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		v = &Impl{}     // ERROR "&Impl{} does not escape$"
+		impl := &Impl{} // ERROR "&Impl{} does not escape$"
+		i := 0
+		for v = impl; i < 10; i++ {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		v = &Impl{}     // ERROR "&Impl{} does not escape$"
+		impl := &Impl{} // ERROR "&Impl{} does not escape$"
+		i := 0
+		for v = impl; i < 10; i++ {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$"
+		v = &Impl{}            // ERROR "&Impl{} does not escape$"
+		for _, v = range m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var v A
+		m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$"
+		v = &Impl{}            // ERROR "&Impl{} does not escape$"
+		for _, v = range &m {
+		}
+		v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+}
+
+func rangeNoDevirt() {
+	{
+		var v A = &Impl2{}            // ERROR "&Impl2{} escapes to heap$"
+		m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$"
+		for v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl2{}         // ERROR "&Impl2{} escapes to heap$"
+		m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$"
+		for v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl2{}         // ERROR "&Impl2{} escapes to heap$"
+		m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$"
+		for _, v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		m := make(chan *Impl)
+		for v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		m := []*Impl{}     // ERROR "\[\]\*Impl{} does not escape$"
+		for _, v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A
+		v = &Impl2{}    // ERROR "&Impl2{} escapes to heap$"
+		impl := &Impl{} // ERROR "&Impl{} escapes to heap$"
+		i := 0
+		for v = impl; i < 10; i++ {
+		}
+		v.A()
+	}
+	{
+		var v A
+		v = &Impl2{}    // ERROR "&Impl2{} escapes to heap$"
+		impl := &Impl{} // ERROR "&Impl{} escapes to heap$"
+		i := 0
+		for v = impl; i < 10; i++ {
+		}
+		v.A()
+	}
+	{
+		var v A
+		m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$"
+		v = &Impl2{}           // ERROR "&Impl2{} escapes to heap$"
+		for _, v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A
+		m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$"
+		v = &Impl2{}           // ERROR "&Impl2{} escapes to heap$"
+		for _, v = range &m {
+		}
+		v.A()
+	}
+
+	{
+		var v A = &Impl{}         // ERROR "&Impl{} escapes to heap$"
+		m := make(map[A]struct{}) // ERROR "make\(map\[A\]struct {}\) does not escape$"
+		for v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl{}  // ERROR "&Impl{} escapes to heap$"
+		m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$"
+		for v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl{}  // ERROR "&Impl{} escapes to heap$"
+		m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$"
+		for _, v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		m := make(chan A)
+		for v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		m := []A{}        // ERROR "\[\]A{} does not escape$"
+		for _, v = range m {
+		}
+		v.A()
+	}
+
+	{
+		var v A
+		m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$"
+		v = &Impl{}        // ERROR "&Impl{} escapes to heap$"
+		for _, v = range m {
+		}
+		v.A()
+	}
+	{
+		var v A
+		m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$"
+		v = &Impl{}        // ERROR "&Impl{} escapes to heap$"
+		for _, v = range &m {
+		}
+		v.A()
+	}
+}
+
+var globalInt = 1
+
+func testIfInit() {
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		var i = &Impl{}   // ERROR "&Impl{} does not escape$"
+		if a = i; globalInt == 1 {
+			a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+		a.A()     // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		if a = i2; globalInt == 1 {
+			a.A()
+		}
+		a.A()
+	}
+}
+
+func testSwitchInit() {
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		var i = &Impl{}   // ERROR "&Impl{} does not escape$"
+		switch a = i; globalInt {
+		case 12:
+			a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		}
+		a.A()     // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+		a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		switch a = i2; globalInt {
+		case 12:
+			a.A()
+		}
+		a.A()
+	}
+}
+
+type implWrapper Impl
+
+func (implWrapper) A() {} // ERROR "can inline implWrapper.A$"
+
+//go:noinline
+func devirtWrapperType() {
+	{
+		i := &Impl{} // ERROR "&Impl{} does not escape$"
+		// This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A.
+		var a A = (*implWrapper)(i)
+		a.A() // ERROR "devirtualizing a.A to \*implWrapper$" "inlining call to implWrapper.A"
+	}
+	{
+		i := Impl{}
+		// This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A.
+		var a A = (implWrapper)(i) // ERROR "implWrapper\(i\) does not escape$"
+		a.A()                      // ERROR "devirtualizing a.A to implWrapper$" "inlining call to implWrapper.A"
+	}
+}
+
+func selfAssigns() {
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape$"
+		a = a
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape"
+		var asAny any = a
+		asAny = asAny
+		asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape"
+		var asAny any = a
+		a = asAny.(A)
+		asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+		a.(A).A()     // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+		b := a
+		b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape"
+		var asAny any = a
+		asAny = asAny
+		a = asAny.(A)
+		asAny = a
+		asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A"
+		asAny.(M).M() // ERROR "devirtualizing asAny.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M"
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} does not escape"
+		var asAny A = a
+		a = asAny.(A)
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+	{
+		var a, b, c A
+		c = &Impl{} // ERROR "&Impl{} does not escape$"
+		a = c
+		c = b
+		b = c
+		a = b
+		b = a
+		c = a
+		a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A"
+	}
+}
+
+func boolNoDevirt() {
+	{
+		m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$"
+		var v any = &Impl{}      // ERROR "&Impl{} escapes to heap$"
+		_, v = m[0]              // ERROR ".autotmp_[0-9]+ escapes to heap$"
+		v.(A).A()
+	}
+	{
+		m := make(chan *Impl)
+		var v any = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		select {
+		case _, v = <-m: // ERROR ".autotmp_[0-9]+ escapes to heap$"
+		}
+		v.(A).A()
+	}
+	{
+		m := make(chan *Impl)
+		var v any = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		_, v = <-m          // ERROR ".autotmp_[0-9]+ escapes to heap$"
+		v.(A).A()
+	}
+	{
+		var a any = 4       // ERROR "4 does not escape$"
+		var v any = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		_, v = a.(int)      // ERROR ".autotmp_[0-9]+ escapes to heap$"
+		v.(A).A()
+	}
+}
+
+func addrTaken() {
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		var ptrA = &a
+		a.A()
+		_ = ptrA
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		var ptrA = &a
+		*ptrA = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		a.A()
+	}
+	{
+		var a A = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		var ptrA = &a
+		*ptrA = &Impl2{} // ERROR "&Impl2{} escapes to heap$"
+		a.A()
+	}
+}
+
+func testInvalidAsserts() {
+	any(0).(interface{ A() }).A() // ERROR "any\(0\) escapes to heap$"
+	{
+		var a M = &Impl{} // ERROR "&Impl{} escapes to heap$"
+		a.(C).C()         // this will panic
+		a.(any).(C).C()   // this will panic
+	}
+	{
+		var a C = &CImpl{} // ERROR "&CImpl{} escapes to heap$"
+		a.(M).M()          // this will panic
+		a.(any).(M).M()    // this will panic
+	}
+	{
+		var a C = &CImpl{} // ERROR "&CImpl{} does not escape$"
+
+		// this will panic
+		a.(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M"
+
+		// this will panic
+		a.(any).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M"
+	}
+}
+
+type namedBool bool
+
+func (namedBool) M() {} // ERROR "can inline namedBool.M$"
+
+//go:noinline
+func namedBoolTest() {
+	m := map[int]int{} // ERROR "map\[int\]int{} does not escape"
+	var ok namedBool
+	_, ok = m[5]
+	var i M = ok // ERROR "ok does not escape"
+	i.M()        // ERROR "devirtualizing i.M to namedBool$" "inlining call to namedBool.M"
+}
diff --git a/test/devirtualization_nil_panics.go b/test/devirtualization_nil_panics.go
new file mode 100644
index 00000000000000..59da454be7f910
--- /dev/null
+++ b/test/devirtualization_nil_panics.go
@@ -0,0 +1,100 @@
+// run
+
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"runtime"
+	"strings"
+)
+
+type A interface{ A() }
+
+type Impl struct{}
+
+func (*Impl) A() {}
+
+type Impl2 struct{}
+
+func (*Impl2) A() {}
+
+func main() {
+	shouldNilPanic(28, func() {
+		var v A
+		v.A()
+		v = &Impl{}
+	})
+	shouldNilPanic(36, func() {
+		var v A
+		defer func() {
+			v = &Impl{}
+		}()
+		v.A()
+	})
+	shouldNilPanic(43, func() {
+		var v A
+		f := func() {
+			v = &Impl{}
+		}
+		v.A()
+		f()
+	})
+
+	// Make sure that both devirtualized and non devirtualized
+	// variants have the panic at the same line.
+	shouldNilPanic(55, func() {
+		var v A
+		defer func() {
+			v = &Impl{}
+		}()
+		v. // A() is on a sepearate line
+			A()
+	})
+	shouldNilPanic(64, func() {
+		var v A
+		defer func() {
+			v = &Impl{}
+			v = &Impl2{} // assign different type, such that the call below does not get devirtualized
+		}()
+		v. // A() is on a sepearate line
+			A()
+	})
+}
+
+var cnt = 0
+
+func shouldNilPanic(wantLine int, f func()) {
+	cnt++
+	defer func() {
+		p := recover()
+		if p == nil {
+			panic("no nil deref panic")
+		}
+		if strings.Contains(fmt.Sprintf("%s", p), "invalid memory address or nil pointer dereference") {
+			callers := make([]uintptr, 128)
+			n := runtime.Callers(0, callers)
+			callers = callers[:n]
+
+			frames := runtime.CallersFrames(callers)
+			line := -1
+			for f, next := frames.Next(); next; f, next = frames.Next() {
+				if f.Func.Name() == fmt.Sprintf("main.main.func%v", cnt) {
+					line = f.Line
+					break
+				}
+			}
+
+			if line != wantLine {
+				panic(fmt.Sprintf("invalid line number in panic = %v; want = %v", line, wantLine))
+			}
+
+			return
+		}
+		panic(p)
+	}()
+	f()
+}
diff --git a/test/devirtualization_with_type_assertions_interleaved.go b/test/devirtualization_with_type_assertions_interleaved.go
new file mode 100644
index 00000000000000..9cf7d995845936
--- /dev/null
+++ b/test/devirtualization_with_type_assertions_interleaved.go
@@ -0,0 +1,107 @@
+// errorcheck -0 -m
+
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+type hashIface interface {
+	Sum() []byte
+}
+
+type clonableHashIface interface {
+	Sum() []byte
+	Clone() hashIface
+}
+
+type hash struct{ state [32]byte }
+
+func (h *hash) Sum() []byte { // ERROR "can inline \(\*hash\).Sum$" "h does not escape$"
+	return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$"
+}
+
+func (h *hash) Clone() hashIface { // ERROR "can inline \(\*hash\).Clone$" "h does not escape$"
+	c := *h // ERROR "moved to heap: c$"
+	return &c
+}
+
+type hash2 struct{ state [32]byte }
+
+func (h *hash2) Sum() []byte { // ERROR "can inline \(\*hash2\).Sum$" "h does not escape$"
+	return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$"
+}
+
+func (h *hash2) Clone() hashIface { // ERROR "can inline \(\*hash2\).Clone$" "h does not escape$"
+	c := *h // ERROR "moved to heap: c$"
+	return &c
+}
+
+func newHash() hashIface { // ERROR "can inline newHash$"
+	return &hash{} // ERROR "&hash{} escapes to heap$"
+}
+
+func cloneHash1(h hashIface) hashIface { // ERROR "can inline cloneHash1$" "leaking param: h$"
+	if h, ok := h.(clonableHashIface); ok {
+		return h.Clone()
+	}
+	return &hash{} // ERROR "&hash{} escapes to heap$"
+}
+
+func cloneHash2(h hashIface) hashIface { // ERROR "can inline cloneHash2$" "leaking param: h$"
+	if h, ok := h.(clonableHashIface); ok {
+		return h.Clone()
+	}
+	return nil
+}
+
+func cloneHash3(h hashIface) hashIface { // ERROR "can inline cloneHash3$" "leaking param: h$"
+	if h, ok := h.(clonableHashIface); ok {
+		return h.Clone()
+	}
+	return &hash2{} // ERROR "&hash2{} escapes to heap$"
+}
+
+func cloneHashWithBool1(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool1$" "leaking param: h$"
+	if h, ok := h.(clonableHashIface); ok {
+		return h.Clone(), true
+	}
+	return &hash{}, false // ERROR "&hash{} escapes to heap$"
+}
+
+func cloneHashWithBool2(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool2$" "leaking param: h$"
+	if h, ok := h.(clonableHashIface); ok {
+		return h.Clone(), true
+	}
+	return nil, false
+}
+
+func cloneHashWithBool3(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool3$" "leaking param: h$"
+	if h, ok := h.(clonableHashIface); ok {
+		return h.Clone(), true
+	}
+	return &hash2{}, false // ERROR "&hash2{} escapes to heap$"
+}
+
+func interleavedWithTypeAssertions() {
+	h1 := newHash() // ERROR "&hash{} does not escape$" "inlining call to newHash"
+	_ = h1.Sum()    // ERROR "devirtualizing h1.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$"
+
+	h2 := cloneHash1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash1"
+	_ = h2.Sum()         // ERROR "devirtualizing h2.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$"
+
+	h3 := cloneHash2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash2"
+	_ = h3.Sum()         // ERROR "devirtualizing h3.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$"
+
+	h4 := cloneHash3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash3" "moved to heap: c$"
+	_ = h4.Sum()
+
+	h5, _ := cloneHashWithBool1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool1"
+	_ = h5.Sum()                    // ERROR "devirtualizing h5.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$"
+
+	h6, _ := cloneHashWithBool2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool2"
+	_ = h6.Sum()                    // ERROR "devirtualizing h6.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$"
+
+	h7, _ := cloneHashWithBool3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool3" "moved to heap: c$"
+	_ = h7.Sum()
+}
diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go
index ccf54fad54a03c..e55f190d7ee571 100644
--- a/test/fixedbugs/issue42284.dir/a.go
+++ b/test/fixedbugs/issue42284.dir/a.go
@@ -22,9 +22,8 @@ func g() {
 	h := E() // ERROR "inlining call to E" "T\(0\) does not escape"
 	h.M()    // ERROR "devirtualizing h.M to T" "inlining call to T.M"
 
-	// BAD: T(0) could be stack allocated.
-	i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap"
+	i := F(T(0)) // ERROR "inlining call to F" "T\(0\) does not escape"
 
-	// Testing that we do NOT devirtualize here:
-	i.M()
+	// It is fine that we devirtualize here, as we add an additional nilcheck.
+	i.M() // ERROR "devirtualizing i.M to T" "inlining call to T.M"
 }
diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go
index 559de59184460a..4a0b7cea102e88 100644
--- a/test/fixedbugs/issue42284.dir/b.go
+++ b/test/fixedbugs/issue42284.dir/b.go
@@ -10,9 +10,8 @@ func g() {
 	h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape"
 	h.M()      // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M"
 
-	// BAD: T(0) could be stack allocated.
-	i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap"
+	i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) does not escape"
 
-	// Testing that we do NOT devirtualize here:
-	i.M()
+	// It is fine that we devirtualize here, as we add an additional nilcheck.
+	i.M() // ERROR "devirtualizing i.M to a.T" "inlining call to a.T.M"
 }