diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index a057a1879412c..e53e55fd05ce1 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -568,17 +568,27 @@ end function maybe_get_const_prop_profitable(interp::AbstractInterpreter, result::MethodCallResult, @nospecialize(f), argtypes::Vector{Any}, match::MethodMatch, sv::InferenceState) - const_prop_entry_heuristic(interp, result, sv) || return nothing + if !InferenceParams(interp).ipo_constant_propagation + add_remark!(interp, sv, "[constprop] Disabled by parameter") + return nothing + end method = match.method + force = force_const_prop(interp, f, method) + force || const_prop_entry_heuristic(interp, result, sv) || return nothing nargs::Int = method.nargs method.isva && (nargs -= 1) - if length(argtypes) < nargs + length(argtypes) < nargs && return nothing + if !(const_prop_argument_heuristic(interp, argtypes) || const_prop_rettype_heuristic(interp, result.rt)) + add_remark!(interp, sv, "[constprop] Disabled by argument and rettype heuristics") return nothing end - const_prop_argument_heuristic(interp, argtypes) || const_prop_rettype_heuristic(interp, result.rt) || return nothing allconst = is_allconst(argtypes) - force = force_const_prop(interp, f, method) - force || const_prop_function_heuristic(interp, f, argtypes, nargs, allconst) || return nothing + if !force + if !const_prop_function_heuristic(interp, f, argtypes, nargs, allconst) + add_remark!(interp, sv, "[constprop] Disabled by function heuristic") + return nothing + end + end force |= allconst mi = specialize_method(match, !force) if mi === nothing @@ -594,8 +604,13 @@ function maybe_get_const_prop_profitable(interp::AbstractInterpreter, result::Me end function const_prop_entry_heuristic(interp::AbstractInterpreter, result::MethodCallResult, sv::InferenceState) - call_result_unused(sv) && result.edgecycle && return false - return is_improvable(result.rt) && InferenceParams(interp).ipo_constant_propagation + if call_result_unused(sv) && result.edgecycle + add_remark!(interp, sv, "[constprop] Disabled by entry heuristic (edgecycle with unused result)") + return false + end + is_improvable(result.rt) && return true + add_remark!(interp, sv, "[constprop] Disabled by entry heuristic (unimprovable return type)") + return false end # see if propagating constants may be worthwhile diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index a2e5a46787734..e9c6768bd26a5 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -35,7 +35,6 @@ end struct DelayedInliningSpec match::Union{MethodMatch, InferenceResult} atypes::Vector{Any} - stmttype::Any end struct InliningTodo @@ -44,23 +43,32 @@ struct InliningTodo spec::Union{ResolvedInliningSpec, DelayedInliningSpec} end -InliningTodo(mi::MethodInstance, match::MethodMatch, - atypes::Vector{Any}, @nospecialize(stmttype)) = InliningTodo(mi, DelayedInliningSpec(match, atypes, stmttype)) +InliningTodo(mi::MethodInstance, match::MethodMatch, atypes::Vector{Any}) = + InliningTodo(mi, DelayedInliningSpec(match, atypes)) -InliningTodo(result::InferenceResult, atypes::Vector{Any}, @nospecialize(stmttype)) = - InliningTodo(result.linfo, DelayedInliningSpec(result, atypes, stmttype)) +InliningTodo(result::InferenceResult, atypes::Vector{Any}) = + InliningTodo(result.linfo, DelayedInliningSpec(result, atypes)) struct ConstantCase val::Any ConstantCase(val) = new(val) end +struct InliningCase + sig # ::Type + item # Union{InliningTodo, MethodInstance, ConstantCase} + function InliningCase(@nospecialize(sig), @nospecialize(item)) + @assert isa(item, Union{InliningTodo, MethodInstance, ConstantCase}) "invalid inlining item" + return new(sig, item) + end +end + struct UnionSplit fully_covered::Bool atype # ::Type - cases::Vector{Pair{Any, Any}} + cases::Vector{InliningCase} bbs::Vector{Int} - UnionSplit(fully_covered::Bool, atype, cases::Vector{Pair{Any, Any}}) = + UnionSplit(fully_covered::Bool, atype, cases::Vector{InliningCase}) = new(fully_covered, atype, cases, Int[]) end @@ -137,14 +145,13 @@ function cfg_inline_item!(ir::IRCode, idx::Int, spec::ResolvedInliningSpec, stat need_split = true #!(idx == last_block_idx) end - if !need_split - delete!(state.merged_orig_blocks, last(new_range)) - end + need_split || delete!(state.merged_orig_blocks, last(new_range)) push!(state.todo_bbs, (length(state.new_cfg_blocks) - 1 + (need_split_before ? 1 : 0), post_bb_id)) from_unionsplit || delete!(state.split_targets, length(state.new_cfg_blocks)) - orig_succs = copy(state.new_cfg_blocks[end].succs) + local orig_succs + need_split && (orig_succs = copy(state.new_cfg_blocks[end].succs)) empty!(state.new_cfg_blocks[end].succs) if need_split_before l = length(state.new_cfg_blocks) @@ -204,53 +211,51 @@ function cfg_inline_item!(ir::IRCode, idx::Int, spec::ResolvedInliningSpec, stat end end end + any_edges || push!(state.dead_blocks, post_bb_id) - if !any_edges - push!(state.dead_blocks, post_bb_id) - end + return nothing end -function cfg_inline_unionsplit!(ir::IRCode, idx::Int, item::UnionSplit, state::CFGInliningState) - block = block_for_inst(ir, idx) - inline_into_block!(state, block) +function cfg_inline_unionsplit!(ir::IRCode, idx::Int, + (; fully_covered, #=atype,=# cases, bbs)::UnionSplit, + state::CFGInliningState) + inline_into_block!(state, block_for_inst(ir, idx)) from_bbs = Int[] delete!(state.split_targets, length(state.new_cfg_blocks)) orig_succs = copy(state.new_cfg_blocks[end].succs) empty!(state.new_cfg_blocks[end].succs) - for (i, (_, case)) in enumerate(item.cases) + for i in 1:length(cases) # The condition gets sunk into the previous block # Add a block for the union-split body push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx))) cond_bb = length(state.new_cfg_blocks)-1 push!(state.new_cfg_blocks[end].preds, cond_bb) push!(state.new_cfg_blocks[cond_bb].succs, cond_bb+1) + case = cases[i].item if isa(case, InliningTodo) spec = case.spec::ResolvedInliningSpec if !spec.linear_inline_eligible cfg_inline_item!(ir, idx, spec, state, true) end end - bb = length(state.new_cfg_blocks) - push!(from_bbs, bb) + push!(from_bbs, length(state.new_cfg_blocks)) # TODO: Right now we unconditionally generate a fallback block # in case of subtyping errors - This is probably unnecessary. - if true # i != length(item.cases) || !item.fully_covered + if true # i != length(cases) || !fully_covered # This block will have the next condition or the final else case push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx))) push!(state.new_cfg_blocks[cond_bb].succs, length(state.new_cfg_blocks)) push!(state.new_cfg_blocks[end].preds, cond_bb) - push!(item.bbs, length(state.new_cfg_blocks)) + push!(bbs, length(state.new_cfg_blocks)) end end # The edge from the fallback block. - if !item.fully_covered - push!(from_bbs, length(state.new_cfg_blocks)) - end + fully_covered || push!(from_bbs, length(state.new_cfg_blocks)) # This block will be the block everyone returns to push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx), from_bbs, orig_succs)) join_bb = length(state.new_cfg_blocks) push!(state.split_targets, join_bb) - push!(item.bbs, join_bb) + push!(bbs, join_bb) for bb in from_bbs push!(state.new_cfg_blocks[bb].succs, join_bb) end @@ -258,8 +263,10 @@ end function finish_cfg_inline!(state::CFGInliningState) new_range = (state.first_bb + 1):length(state.cfg.blocks) - l = length(state.new_cfg_blocks) - state.bb_rename[new_range] = (l+1:l+length(new_range)) + state.bb_rename[new_range] = let + l = length(state.new_cfg_blocks) + l+1:l+length(new_range) + end append!(state.new_cfg_blocks, state.cfg.blocks[new_range]) # Rename edges original bbs @@ -307,7 +314,6 @@ function ir_inline_item!(compact::IncrementalCompact, idx::Int, argexprs::Vector sparam_vals = item.mi.sparam_vals def = item.mi.def::Method inline_cfg = spec.ir.cfg - stmt = compact.result[idx][:inst] linetable_offset::Int32 = length(linetable) # Append the linetable of the inlined function to our line table inlined_at = Int(compact.result[idx][:line]) @@ -339,8 +345,7 @@ function ir_inline_item!(compact::IncrementalCompact, idx::Int, argexprs::Vector vararg = mk_tuplecall!(compact, argexprs[nargs_def:end], topline) argexprs = Any[argexprs[1:(nargs_def - 1)]..., vararg] end - is_opaque = def.is_for_opaque_closure - if is_opaque + if def.is_for_opaque_closure # Replace the first argument by a load of the capture environment argexprs[1] = insert_node_here!(compact, NewInstruction(Expr(:call, GlobalRef(Core, :getfield), argexprs[1], QuoteNode(:captures)), @@ -358,7 +363,6 @@ function ir_inline_item!(compact::IncrementalCompact, idx::Int, argexprs::Vector local return_value # Special case inlining that maintains the current basic block if there's only one BB in the target if spec.linear_inline_eligible - terminator = spec.ir[SSAValue(last(inline_cfg.blocks[1].stmts))] #compact[idx] = nothing inline_compact = IncrementalCompact(compact, spec.ir, compact.result_idx) for ((_, idx′), stmt′) in inline_compact @@ -449,16 +453,18 @@ const fatal_type_bound_error = ErrorException("fatal error in type inference (ty function ir_inline_unionsplit!(compact::IncrementalCompact, idx::Int, argexprs::Vector{Any}, linetable::Vector{LineInfoNode}, - item::UnionSplit, boundscheck::Symbol, todo_bbs::Vector{Tuple{Int, Int}}) + (; fully_covered, atype, cases, bbs)::UnionSplit, + boundscheck::Symbol, todo_bbs::Vector{Tuple{Int, Int}}) stmt, typ, line = compact.result[idx][:inst], compact.result[idx][:type], compact.result[idx][:line] - atype = item.atype - generic_bb = item.bbs[end-1] - join_bb = item.bbs[end] - bb = compact.active_result_bb + join_bb = bbs[end] pn = PhiNode() - has_generic = false - @assert length(item.bbs) > length(item.cases) - for ((metharg, case), next_cond_bb) in zip(item.cases, item.bbs) + local bb = compact.active_result_bb + @assert length(bbs) > length(cases) + for i in 1:length(cases) + ithcase = cases[i] + metharg = ithcase.sig + case = ithcase.item + next_cond_bb = bbs[i] @assert !isa(metharg, UnionAll) cond = true @assert length(atype.parameters) == length(metharg.parameters) @@ -513,7 +519,7 @@ function ir_inline_unionsplit!(compact::IncrementalCompact, idx::Int, end bb += 1 # We're now in the fall through block, decide what to do - if item.fully_covered + if fully_covered e = Expr(:call, GlobalRef(Core, :throw), fatal_type_bound_error) insert_node_here!(compact, NewInstruction(e, Union{}, line)) insert_node_here!(compact, NewInstruction(ReturnNode(), Union{}, line)) @@ -672,8 +678,9 @@ function rewrite_apply_exprargs!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx:: info = call.info handled = false if isa(info, ConstCallInfo) - if maybe_handle_const_call!(ir, state1.id, new_stmt, info, new_sig, - call.rt, istate, false, todo) + if maybe_handle_const_call!( + ir, state1.id, new_stmt, info, new_sig, + istate, false, todo) handled = true else info = info.call @@ -683,8 +690,9 @@ function rewrite_apply_exprargs!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx:: info = isa(info, MethodMatchInfo) ? MethodMatchInfo[info] : info.matches # See if we can inline this call to `iterate` - analyze_single_call!(ir, todo, state1.id, new_stmt, - new_sig, call.rt, info, istate) + analyze_single_call!( + ir, todo, state1.id, new_stmt, + new_sig, info, istate) end if i != length(thisarginfo.each) valT = getfield_tfunc(call.rt, Const(1)) @@ -704,11 +712,13 @@ function rewrite_apply_exprargs!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx:: return new_argexprs, new_atypes end -function rewrite_invoke_exprargs!(argexprs::Vector{Any}) +function rewrite_invoke_exprargs!(expr::Expr) + argexprs = expr.args argexpr0 = argexprs[2] - argexprs = argexprs[4:end] - pushfirst!(argexprs, argexpr0) - return argexprs + argexprs = argexprs[3:end] + argexprs[1] = argexpr0 + expr.args = argexprs + return expr end function singleton_type(@nospecialize(ft)) @@ -785,17 +795,15 @@ function resolve_todo(todo::InliningTodo, state::InliningState) return InliningTodo(todo.mi, src) end -function resolve_todo(todo::UnionSplit, state::InliningState) - UnionSplit(todo.fully_covered, todo.atype, - Pair{Any,Any}[sig=>resolve_todo(item, state) for (sig, item) in todo.cases]) -end - -function resolve_todo!(todo::Vector{Pair{Int, Any}}, state::InliningState) - for i = 1:length(todo) - idx, item = todo[i] - todo[i] = idx=>resolve_todo(item, state) +function resolve_todo((; fully_covered, atype, cases, #=bbs=#)::UnionSplit, state::InliningState) + ncases = length(cases) + newcases = Vector{InliningCase}(undef, ncases) + for i in 1:ncases + (; sig, item) = cases[i] + newitem = resolve_todo(item, state) + push!(newcases, InliningCase(sig, newitem)) end - todo + return UnionSplit(fully_covered, atype, newcases) end function validate_sparams(sparams::SimpleVector) @@ -806,7 +814,7 @@ function validate_sparams(sparams::SimpleVector) end function analyze_method!(match::MethodMatch, atypes::Vector{Any}, - state::InliningState, @nospecialize(stmttyp)) + state::InliningState) method = match.method methsig = method.sig @@ -836,7 +844,7 @@ function analyze_method!(match::MethodMatch, atypes::Vector{Any}, return compileable_specialization(state.et, match) end - todo = InliningTodo(mi, match, atypes, stmttyp) + todo = InliningTodo(mi, match, atypes) # If we don't have caches here, delay resolving this MethodInstance # until the batch inlining step (or an external post-processing pass) state.mi_cache === nothing && return todo @@ -906,17 +914,13 @@ function handle_single_case!(ir::IRCode, stmt::Expr, idx::Int, @nospecialize(cas if isa(case, ConstantCase) ir[SSAValue(idx)] = case.val elseif isa(case, MethodInstance) - if isinvoke - stmt.args = rewrite_invoke_exprargs!(stmt.args) - end + isinvoke && rewrite_invoke_exprargs!(stmt) stmt.head = :invoke pushfirst!(stmt.args, case) elseif case === nothing # Do, well, nothing else - if isinvoke - stmt.args = rewrite_invoke_exprargs!(stmt.args) - end + isinvoke && rewrite_invoke_exprargs!(stmt) push!(todo, idx=>(case::InliningTodo)) end nothing @@ -1068,7 +1072,6 @@ is_builtin(s::Signature) = function inline_invoke!(ir::IRCode, idx::Int, sig::Signature, (; match, result)::InvokeCallInfo, state::InliningState, todo::Vector{Pair{Int, Any}}) stmt = ir.stmts[idx][:inst] - calltype = ir.stmts[idx][:type] if !match.fully_covers # TODO: We could union split out the signature check and continue on @@ -1081,7 +1084,7 @@ function inline_invoke!(ir::IRCode, idx::Int, sig::Signature, (; match, result): pushfirst!(atypes, atype0) if isa(result, InferenceResult) - (; mi) = item = InliningTodo(result, atypes, calltype) + (; mi) = item = InliningTodo(result, atypes) validate_sparams(mi.sparam_vals) || return nothing if argtypes_to_type(atypes) <: mi.def.sig state.mi_cache !== nothing && (item = resolve_todo(item, state)) @@ -1090,7 +1093,7 @@ function inline_invoke!(ir::IRCode, idx::Int, sig::Signature, (; match, result): end end - result = analyze_method!(match, atypes, state, calltype) + result = analyze_method!(match, atypes, state) handle_single_case!(ir, stmt, idx, result, true, todo) return nothing end @@ -1199,13 +1202,13 @@ function process_simple!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, sta return sig end -function analyze_single_call!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, @nospecialize(stmt), - sig::Signature, @nospecialize(calltype), infos::Vector{MethodMatchInfo}, - state::InliningState) - cases = Pair{Any, Any}[] - signature_union = Union{} - only_method = nothing # keep track of whether there is one matching method - too_many = false +# TODO inline non-`isdispatchtuple`, union-split callsites +function analyze_single_call!( + ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, @nospecialize(stmt), + (; atypes, atype)::Signature, infos::Vector{MethodMatchInfo}, state::InliningState) + cases = InliningCase[] + local signature_union = Bottom + local only_method = nothing # keep track of whether there is one matching method local meth local fully_covered = true for i in 1:length(infos) @@ -1214,8 +1217,7 @@ function analyze_single_call!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int if meth.ambig # Too many applicable methods # Or there is a (partial?) ambiguity - too_many = true - break + return elseif length(meth) == 0 # No applicable methods; try next union split continue @@ -1235,38 +1237,36 @@ function analyze_single_call!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int fully_covered = false continue end - case = analyze_method!(match, sig.atypes, state, calltype) - if case === nothing + item = analyze_method!(match, atypes, state) + if item === nothing fully_covered = false continue - elseif _any(p->p[1] === spec_types, cases) + elseif _any(case->case.sig === spec_types, cases) continue end - push!(cases, Pair{Any,Any}(spec_types, case)) + push!(cases, InliningCase(spec_types, item)) end end - too_many && return - - signature_fully_covered = sig.atype <: signature_union - # If we're fully covered and there's only one applicable method, - # we inline, even if the signature is not a dispatch tuple - if signature_fully_covered && length(cases) == 0 && only_method isa Method - if length(infos) > 1 - (metharg, methsp) = ccall(:jl_type_intersection_with_env, Any, (Any, Any), - sig.atype, only_method.sig)::SimpleVector - match = MethodMatch(metharg, methsp, only_method, true) - else - meth = meth::MethodLookupResult - @assert length(meth) == 1 - match = meth[1] + # if the signature is fully covered and there is only one applicable method, + # we can try to inline it even if the signature is not a dispatch tuple + if atype <: signature_union + if length(cases) == 0 && only_method isa Method + if length(infos) > 1 + (metharg, methsp) = ccall(:jl_type_intersection_with_env, Any, (Any, Any), + atype, only_method.sig)::SimpleVector + match = MethodMatch(metharg, methsp, only_method, true) + else + meth = meth::MethodLookupResult + @assert length(meth) == 1 + match = meth[1] + end + item = analyze_method!(match, atypes, state) + item === nothing && return + push!(cases, InliningCase(match.spec_types, item)) + fully_covered = true end - fully_covered = true - case = analyze_method!(match, sig.atypes, state, calltype) - case === nothing && return - push!(cases, Pair{Any,Any}(match.spec_types, case)) - end - if !signature_fully_covered + else fully_covered = false end @@ -1274,42 +1274,82 @@ function analyze_single_call!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int # be able to do the inlining now (for constant cases), or push it directly # onto the todo list if fully_covered && length(cases) == 1 - handle_single_case!(ir, stmt, idx, cases[1][2], false, todo) - return + handle_single_case!(ir, stmt, idx, cases[1].item, false, todo) + elseif length(cases) > 0 + push!(todo, idx=>UnionSplit(fully_covered, atype, cases)) end - length(cases) == 0 && return - push!(todo, idx=>UnionSplit(fully_covered, sig.atype, cases)) return nothing end -function maybe_handle_const_call!(ir::IRCode, idx::Int, stmt::Expr, - info::ConstCallInfo, sig::Signature, @nospecialize(calltype), - state::InliningState, - isinvoke::Bool, todo::Vector{Pair{Int, Any}}) - # when multiple matches are found, bail out and later inliner will union-split this signature - # TODO effectively use multiple constant analysis results here - length(info.results) == 1 || return false - result = info.results[1] - isa(result, InferenceResult) || return false - - item = InliningTodo(result, sig.atypes, calltype) - validate_sparams(item.mi.sparam_vals) || return true - mthd_sig = item.mi.def.sig - mistypes = item.mi.specTypes - state.mi_cache !== nothing && (item = resolve_todo(item, state)) - if sig.atype <: mthd_sig - handle_single_case!(ir, stmt, idx, item, isinvoke, todo) - return true - else - item === nothing && return true - # Union split out the error case - item = UnionSplit(false, sig.atype, Pair{Any, Any}[mistypes => item]) - if isinvoke - stmt.args = rewrite_invoke_exprargs!(stmt.args) +# try to create `InliningCase`s using constant-prop'ed results +# currently it works only when constant-prop' succeeded for all (union-split) signatures +# TODO use any of constant-prop'ed results, and leave the other unhandled cases to later +# TODO this function contains a lot of duplications with `analyze_single_call!`, factor them out +function maybe_handle_const_call!( + ir::IRCode, idx::Int, stmt::Expr, (; results)::ConstCallInfo, (; atypes, atype)::Signature, + state::InliningState, isinvoke::Bool, todo::Vector{Pair{Int, Any}}) + cases = InliningCase[] # TODO avoid this allocation for single cases ? + local fully_covered = true + local signature_union = Bottom + for result in results + isa(result, InferenceResult) || return false + (; mi) = item = InliningTodo(result, atypes) + spec_types = mi.specTypes + signature_union = Union{signature_union, spec_types} + if !isdispatchtuple(spec_types) + fully_covered = false + continue end - push!(todo, idx=>item) - return true + if !validate_sparams(mi.sparam_vals) + fully_covered = false + continue + end + state.mi_cache !== nothing && (item = resolve_todo(item, state)) + if item === nothing + fully_covered = false + continue + end + push!(cases, InliningCase(spec_types, item)) end + + # if the signature is fully covered and there is only one applicable method, + # we can try to inline it even if the signature is not a dispatch tuple + if atype <: signature_union + if length(cases) == 0 && length(results) == 1 + (; mi) = item = InliningTodo(results[1]::InferenceResult, atypes) + state.mi_cache !== nothing && (item = resolve_todo(item, state)) + validate_sparams(mi.sparam_vals) || return true + item === nothing && return true + push!(cases, InliningCase(mi.specTypes, item)) + fully_covered = true + end + else + fully_covered = false + end + + # If we only have one case and that case is fully covered, we may either + # be able to do the inlining now (for constant cases), or push it directly + # onto the todo list + if fully_covered && length(cases) == 1 + handle_single_case!(ir, stmt, idx, cases[1].item, isinvoke, todo) + elseif length(cases) > 0 + isinvoke && rewrite_invoke_exprargs!(stmt) + push!(todo, idx=>UnionSplit(fully_covered, atype, cases)) + end + return true +end + +function handle_const_opaque_closure_call!( + ir::IRCode, idx::Int, stmt::Expr, (; results)::ConstCallInfo, + (; atypes)::Signature, state::InliningState, todo::Vector{Pair{Int, Any}}) + @assert length(results) == 1 + result = results[1]::InferenceResult + item = InliningTodo(result, atypes) + isdispatchtuple(item.mi.specTypes) || return + validate_sparams(item.mi.sparam_vals) || return + state.mi_cache !== nothing && (item = resolve_todo(item, state)) + handle_single_case!(ir, stmt, idx, item, false, todo) + return nothing end function assemble_inline_todo!(ir::IRCode, state::InliningState) @@ -1321,11 +1361,11 @@ function assemble_inline_todo!(ir::IRCode, state::InliningState) sig === nothing && continue stmt = ir.stmts[idx][:inst] - calltype = ir.stmts[idx][:type] info = ir.stmts[idx][:info] # Check whether this call was @pure and evaluates to a constant if info isa MethodResultPure + calltype = ir.stmts[idx][:type] if calltype isa Const && is_inlineable_constant(calltype.val) ir.stmts[idx][:inst] = quoted(calltype.val) continue @@ -1339,20 +1379,25 @@ function assemble_inline_todo!(ir::IRCode, state::InliningState) continue end - # If inference arrived at this result by using constant propagation, - # it'll have performed a specialized analysis for just this case. Use its - # result. + # if inference arrived here with constant-prop'ed result(s), + # we can perform a specialized analysis for just this case if isa(info, ConstCallInfo) - if maybe_handle_const_call!(ir, idx, stmt, info, sig, calltype, state, sig.f === Core.invoke, todo) + if isa(info.call, OpaqueClosureCallInfo) + handle_const_opaque_closure_call!( + ir, idx, stmt, info, + sig, state, todo) continue else - info = info.call + maybe_handle_const_call!( + ir, idx, stmt, info, sig, + state, sig.f === Core.invoke, todo) && continue end + info = info.call end if isa(info, OpaqueClosureCallInfo) - result = analyze_method!(info.match, sig.atypes, state, calltype) - handle_single_case!(ir, stmt, idx, result, false, todo) + item = analyze_method!(info.match, sig.atypes, state) + handle_single_case!(ir, stmt, idx, item, false, todo) continue end @@ -1373,7 +1418,7 @@ function assemble_inline_todo!(ir::IRCode, state::InliningState) continue end - analyze_single_call!(ir, todo, idx, stmt, sig, calltype, infos, state) + analyze_single_call!(ir, todo, idx, stmt, sig, infos, state) end todo end diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 475dc8b571442..0c5f49f1d9e12 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -392,3 +392,113 @@ let f(x) = (x...,) # the the original apply call is not union-split, but the inserted `iterate` call is. @test code_typed(f, Tuple{Union{Int64, CartesianIndex{1}, CartesianIndex{3}}})[1][2] == Tuple{Int64} end + +# check if `x` is a statically-resolved call of a function whose name is `sym` +isinvoke(@nospecialize(x), sym::Symbol) = isinvoke(x, mi->mi.def.name===sym) +function isinvoke(@nospecialize(x), pred) + if Meta.isexpr(x, :invoke) + return pred(x.args[1]::Core.MethodInstance) + end + return false +end +code_typed1(args...; kwargs...) = (first(only(code_typed(args...; kwargs...)))::Core.CodeInfo).code + +# https://github.com/JuliaLang/julia/issues/42754 +# inline union-split constant-prop'ed sources +mutable struct X42754 + # NOTE in order to confuse `fieldtype_tfunc`, we need to have at least two fields with different types + a::Union{Nothing, Int} + b::Symbol +end +let code = code_typed1((X42754, Union{Nothing,Int})) do x, a + # this `setproperty` call would be union-split and constant-prop will happen for + # each signature: inlining would fail if we don't use constant-prop'ed source + # since the approximate inlining cost of `convert(fieldtype(X, sym), a)` would + # end up very high if we don't propagate `sym::Const(:a)` + x.a = a + x + end + @test all(code) do @nospecialize(x) + isinvoke(x, :setproperty!) && return false + if Meta.isexpr(x, :call) + f = x.args[1] + isa(f, GlobalRef) && f.name === :setproperty! && return false + end + return true + end +end + +import Base: @constprop + +# test single, non-dispatchtuple callsite inlining + +@constprop :none @inline test_single_nondispatchtuple(@nospecialize(t)) = + isa(t, DataType) && t.name === Type.body.name +let + code = code_typed1((Any,)) do x + test_single_nondispatchtuple(x) + end + @test all(code) do @nospecialize(x) + isinvoke(x, :test_single_nondispatchtuple) && return false + if Meta.isexpr(x, :call) + f = x.args[1] + isa(f, GlobalRef) && f.name === :test_single_nondispatchtuple && return false + end + return true + end +end + +@constprop :aggressive @inline test_single_nondispatchtuple(c, @nospecialize(t)) = + c && isa(t, DataType) && t.name === Type.body.name +let + code = code_typed1((Any,)) do x + test_single_nondispatchtuple(true, x) + end + @test all(code) do @nospecialize(x) + isinvoke(x, :test_single_nondispatchtuple) && return false + if Meta.isexpr(x, :call) + f = x.args[1] + isa(f, GlobalRef) && f.name === :test_single_nondispatchtuple && return false + end + return true + end +end + +# force constant-prop' for `setproperty!` +let m = Module() + code = @eval m begin + # if we don't force constant-prop', `T = fieldtype(Foo, ::Symbol)` will be union-split to + # `Union{Type{Any},Type{Int}` and it will make `convert(T, nothing)` too costly + # and it leads to inlining failure + mutable struct Foo + val + _::Int + end + + function setter(xs) + for x in xs + x.val = nothing + end + end + + $code_typed1(setter, (Vector{Foo},)) + end + + @test !any(x->isinvoke(x, :setproperty!), code) +end + +# validate inlining processing + +@constprop :none @inline validate_unionsplit_inlining(@nospecialize(t)) = throw("invalid inlining processing detected") +@constprop :none @noinline validate_unionsplit_inlining(i::Integer) = (println(IOBuffer(), "prevent inlining"); false) +let + invoke(xs) = validate_unionsplit_inlining(xs[1]) + @test invoke(Any[10]) === false +end + +@constprop :aggressive @inline validate_unionsplit_inlining(c, @nospecialize(t)) = c && throw("invalid inlining processing detected") +@constprop :aggressive @noinline validate_unionsplit_inlining(c, i::Integer) = c && (println(IOBuffer(), "prevent inlining"); false) +let + invoke(xs) = validate_unionsplit_inlining(true, xs[1]) + @test invoke(Any[10]) === false +end