From be0c69957e7489423606023ad820599652a60e15 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 20 Jun 2023 13:39:35 +0100 Subject: [PATCH 1/7] compiler: remove destination type from cast builtins Resolves: #5909 --- src/AstGen.zig | 215 ++++++++++---- src/Autodoc.zig | 3 - src/BuiltinFn.zig | 28 +- src/Sema.zig | 707 ++++++++++++++++++++++++++++----------------- src/TypedValue.zig | 5 - src/Zir.zig | 44 ++- src/print_zir.zig | 34 ++- 7 files changed, 680 insertions(+), 356 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index f1acd7e3e3c2..df64d5854910 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -335,6 +335,32 @@ const ResultInfo = struct { }, } } + + /// Find the result type for a cast builtin given the result location. + /// If the location does not have a known result type, emits an error on + /// the given node. + fn resultType(rl: Loc, gz: *GenZir, node: Ast.Node.Index, builtin_name: []const u8) !Zir.Inst.Ref { + const astgen = gz.astgen; + switch (rl) { + .discard, .none, .ref, .inferred_ptr => {}, + .ty, .coerced_ty => |ty_ref| return ty_ref, + .ptr => |ptr| { + const ptr_ty = try gz.addUnNode(.typeof, ptr.inst, node); + return gz.addUnNode(.elem_type, ptr_ty, node); + }, + .block_ptr => |block_scope| { + if (block_scope.rl_ty_inst != .none) return block_scope.rl_ty_inst; + if (block_scope.break_result_info.rl == .ptr) { + const ptr_ty = try gz.addUnNode(.typeof, block_scope.break_result_info.rl.ptr.inst, node); + return gz.addUnNode(.elem_type, ptr_ty, node); + } + }, + } + + return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ + try astgen.errNoteNode(node, "use @as to provide explicit result type", .{}), + }); + } }; const Context = enum { @@ -2521,6 +2547,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .array_type, .array_type_sentinel, .elem_type_index, + .elem_type, .vector_type, .indexable_ptr_len, .anyframe_type, @@ -2662,7 +2689,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .int_cast, .ptr_cast, .truncate, - .align_cast, .has_decl, .has_field, .clz, @@ -7924,11 +7950,10 @@ fn bitCast( scope: *Scope, ri: ResultInfo, node: Ast.Node.Index, - lhs: Ast.Node.Index, - rhs: Ast.Node.Index, + operand_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { - const dest_type = try reachableTypeExpr(gz, scope, lhs, node); - const operand = try reachableExpr(gz, scope, .{ .rl = .none }, rhs, node); + const dest_type = try ri.rl.resultType(gz, node, "@bitCast"); + const operand = try reachableExpr(gz, scope, .{ .rl = .none }, operand_node, node); const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = operand, @@ -7936,6 +7961,116 @@ fn bitCast( return rvalue(gz, ri, result, node); } +/// Handle one or more nested pointer cast builtins: +/// * @ptrCast +/// * @alignCast +/// * @addrSpaceCast +/// * @constCast +/// * @volatileCast +/// Any sequence of such builtins is treated as a single operation. This allowed +/// for sequences like `@ptrCast(@alignCast(ptr))` to work correctly despite the +/// intermediate result type being unknown. +fn ptrCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + root_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + var flags: Zir.Inst.FullPtrCastFlags = .{}; + + // Note that all pointer cast builtins have one parameter, so we only need + // to handle `builtin_call_two`. + var node = root_node; + while (true) { + switch (node_tags[node]) { + .builtin_call_two, .builtin_call_two_comma => {}, + .grouped_expression => { + // Handle the chaining even with redundant parentheses + node = node_datas[node].lhs; + continue; + }, + else => break, + } + + if (node_datas[node].lhs == 0) break; // 0 args + if (node_datas[node].rhs != 0) break; // 2 args + + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + const info = BuiltinFn.list.get(builtin_name) orelse break; + if (info.param_count != 1) break; + + switch (info.tag) { + else => break, + inline .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + => |tag| { + if (@field(flags, @tagName(tag))) { + return astgen.failNode(node, "redundant {s}", .{builtin_name}); + } + @field(flags, @tagName(tag)) = true; + }, + } + + node = node_datas[node].lhs; + } + + const flags_i = @bitCast(u5, flags); + assert(flags_i != 0); + + const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true }; + if (flags_i == @bitCast(u5, ptr_only)) { + // Special case: simpler representation + return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast"); + } + + const no_result_ty_flags: Zir.Inst.FullPtrCastFlags = .{ + .const_cast = true, + .volatile_cast = true, + }; + if ((flags_i & ~@bitCast(u5, no_result_ty_flags)) == 0) { + // Result type not needed + const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); + const operand = try expr(gz, scope, .{ .rl = .none }, node); + try emitDbgStmt(gz, cursor); + const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_i, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(root_node), + .operand = operand, + }); + return rvalue(gz, ri, result, root_node); + } + + // Full cast including result type + const need_result_type_builtin = if (flags.ptr_cast) + "@ptrCast" + else if (flags.align_cast) + "@alignCast" + else if (flags.addrspace_cast) + "@addrSpaceCast" + else + unreachable; + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); + const result_type = try ri.rl.resultType(gz, root_node, need_result_type_builtin); + const operand = try expr(gz, scope, .{ .rl = .none }, node); + try emitDbgStmt(gz, cursor); + const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_i, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(root_node), + .lhs = result_type, + .rhs = operand, + }); + return rvalue(gz, ri, result, root_node); +} + fn typeOf( gz: *GenZir, scope: *Scope, @@ -8123,7 +8258,7 @@ fn builtinCall( // zig fmt: off .as => return as( gz, scope, ri, node, params[0], params[1]), - .bit_cast => return bitCast( gz, scope, ri, node, params[0], params[1]), + .bit_cast => return bitCast( gz, scope, ri, node, params[0]), .TypeOf => return typeOf( gz, scope, ri, node, params), .union_init => return unionInit(gz, scope, ri, node, params), .c_import => return cImport( gz, scope, node, params[0]), @@ -8308,14 +8443,13 @@ fn builtinCall( .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), - .int_from_float => return typeCast(gz, scope, ri, node, params[0], params[1], .int_from_float), - .float_from_int => return typeCast(gz, scope, ri, node, params[0], params[1], .float_from_int), - .ptr_from_int => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_from_int), - .enum_from_int => return typeCast(gz, scope, ri, node, params[0], params[1], .enum_from_int), - .float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast), - .int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast), - .ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast), - .truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate), + .int_from_float => return typeCast(gz, scope, ri, node, params[0], .int_from_float, builtin_name), + .float_from_int => return typeCast(gz, scope, ri, node, params[0], .float_from_int, builtin_name), + .ptr_from_int => return typeCast(gz, scope, ri, node, params[0], .ptr_from_int, builtin_name), + .enum_from_int => return typeCast(gz, scope, ri, node, params[0], .enum_from_int, builtin_name), + .float_cast => return typeCast(gz, scope, ri, node, params[0], .float_cast, builtin_name), + .int_cast => return typeCast(gz, scope, ri, node, params[0], .int_cast, builtin_name), + .truncate => return typeCast(gz, scope, ri, node, params[0], .truncate, builtin_name), // zig fmt: on .Type => { @@ -8368,49 +8502,22 @@ fn builtinCall( }); return rvalue(gz, ri, result, node); }, - .align_cast => { - const dest_align = try comptimeExpr(gz, scope, align_ri, params[0]); - const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.align_cast, node, Zir.Inst.Bin{ - .lhs = dest_align, - .rhs = rhs, - }); - return rvalue(gz, ri, result, node); - }, .err_set_cast => { try emitDbgNode(gz, node); const result = try gz.addExtendedPayload(.err_set_cast, Zir.Inst.BinNode{ - .lhs = try typeExpr(gz, scope, params[0]), - .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), + .lhs = try ri.rl.resultType(gz, node, "@errSetCast"), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[0]), .node = gz.nodeIndexToRelative(node), }); return rvalue(gz, ri, result, node); }, - .addrspace_cast => { - const result = try gz.addExtendedPayload(.addrspace_cast, Zir.Inst.BinNode{ - .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .address_space_type } }, params[0]), - .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), - .node = gz.nodeIndexToRelative(node), - }); - return rvalue(gz, ri, result, node); - }, - .const_cast => { - const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); - const result = try gz.addExtendedPayload(.const_cast, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .volatile_cast => { - const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); - const result = try gz.addExtendedPayload(.volatile_cast, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, + .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + => return ptrCast(gz, scope, ri, node), // zig fmt: off .has_decl => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_decl), @@ -8725,13 +8832,13 @@ fn typeCast( scope: *Scope, ri: ResultInfo, node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, + operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, + builtin_name: []const u8, ) InnerError!Zir.Inst.Ref { const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const result_type = try typeExpr(gz, scope, lhs_node); - const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); + const result_type = try ri.rl.resultType(gz, node, builtin_name); + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); try emitDbgStmt(gz, cursor); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ @@ -9432,6 +9539,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_ switch (builtin_info.needs_mem_loc) { .never => return false, .always => return true, + .forward0 => node = node_datas[node].lhs, .forward1 => node = node_datas[node].rhs, } // Missing builtin arg is not a parsing error, expect an error later. @@ -9448,6 +9556,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_ switch (builtin_info.needs_mem_loc) { .never => return false, .always => return true, + .forward0 => node = params[0], .forward1 => node = params[1], } // Missing builtin arg is not a parsing error, expect an error later. diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 68ddcc94c458..33c57b119792 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -1529,7 +1529,6 @@ fn walkInstruction( .int_cast, .ptr_cast, .truncate, - .align_cast, .has_decl, .has_field, .div_exact, @@ -3024,8 +3023,6 @@ fn walkInstruction( .int_from_error, .error_from_int, .reify, - .const_cast, - .volatile_cast, => { const extra = file.zir.extraData(Zir.Inst.UnNode, extended.operand).data; const bin_index = self.exprs.items.len; diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig index 27b963f52871..9498b8dc83c9 100644 --- a/src/BuiltinFn.zig +++ b/src/BuiltinFn.zig @@ -129,6 +129,8 @@ pub const MemLocRequirement = enum { never, /// The builtin always needs a memory location. always, + /// The builtin forwards the question to argument at index 0. + forward0, /// The builtin forwards the question to argument at index 1. forward1, }; @@ -168,14 +170,14 @@ pub const list = list: { "@addrSpaceCast", .{ .tag = .addrspace_cast, - .param_count = 2, + .param_count = 1, }, }, .{ "@alignCast", .{ .tag = .align_cast, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -226,8 +228,8 @@ pub const list = list: { "@bitCast", .{ .tag = .bit_cast, - .needs_mem_loc = .forward1, - .param_count = 2, + .needs_mem_loc = .forward0, + .param_count = 1, }, }, .{ @@ -457,7 +459,7 @@ pub const list = list: { .{ .tag = .err_set_cast, .eval_to_error = .always, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -502,14 +504,14 @@ pub const list = list: { "@floatCast", .{ .tag = .float_cast, - .param_count = 2, + .param_count = 1, }, }, .{ "@intFromFloat", .{ .tag = .int_from_float, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -572,14 +574,14 @@ pub const list = list: { "@intCast", .{ .tag = .int_cast, - .param_count = 2, + .param_count = 1, }, }, .{ "@enumFromInt", .{ .tag = .enum_from_int, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -594,14 +596,14 @@ pub const list = list: { "@floatFromInt", .{ .tag = .float_from_int, - .param_count = 2, + .param_count = 1, }, }, .{ "@ptrFromInt", .{ .tag = .ptr_from_int, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -685,7 +687,7 @@ pub const list = list: { "@ptrCast", .{ .tag = .ptr_cast, - .param_count = 2, + .param_count = 1, }, }, .{ @@ -938,7 +940,7 @@ pub const list = list: { "@truncate", .{ .tag = .truncate, - .param_count = 2, + .param_count = 1, }, }, .{ diff --git a/src/Sema.zig b/src/Sema.zig index b171c1bcb84b..e45cccd43bad 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -960,6 +960,7 @@ fn analyzeBodyInner( .elem_val => try sema.zirElemVal(block, inst), .elem_val_node => try sema.zirElemValNode(block, inst), .elem_type_index => try sema.zirElemTypeIndex(block, inst), + .elem_type => try sema.zirElemType(block, inst), .enum_literal => try sema.zirEnumLiteral(block, inst), .int_from_enum => try sema.zirIntFromEnum(block, inst), .enum_from_int => try sema.zirEnumFromInt(block, inst), @@ -1044,7 +1045,6 @@ fn analyzeBodyInner( .int_cast => try sema.zirIntCast(block, inst), .ptr_cast => try sema.zirPtrCast(block, inst), .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), .has_decl => try sema.zirHasDecl(block, inst), .has_field => try sema.zirHasField(block, inst), .byte_swap => try sema.zirByteSwap(block, inst), @@ -1172,13 +1172,12 @@ fn analyzeBodyInner( .reify => try sema.zirReify( block, extended, inst), .builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended), .cmpxchg => try sema.zirCmpxchg( block, extended), - .addrspace_cast => try sema.zirAddrSpaceCast( block, extended), .c_va_arg => try sema.zirCVaArg( block, extended), .c_va_copy => try sema.zirCVaCopy( block, extended), .c_va_end => try sema.zirCVaEnd( block, extended), .c_va_start => try sema.zirCVaStart( block, extended), - .const_cast, => try sema.zirConstCast( block, extended), - .volatile_cast, => try sema.zirVolatileCast( block, extended), + .ptr_cast_full => try sema.zirPtrCastFull( block, extended), + .ptr_cast_no_dest => try sema.zirPtrCastNoDest( block, extended), .work_item_id => try sema.zirWorkItem( block, extended, extended.opcode), .work_group_size => try sema.zirWorkItem( block, extended, extended.opcode), .work_group_id => try sema.zirWorkItem( block, extended, extended.opcode), @@ -1821,6 +1820,24 @@ pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Ins return ty; } +fn resolveCastDestType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, builtin_name: []const u8) !Type { + return sema.resolveType(block, src, zir_ref) catch |err| switch (err) { + error.GenericPoison => { + // Cast builtins use their result type as the destination type, but + // it could be an anytype argument, which we can't catch in AstGen. + const msg = msg: { + const msg = try sema.errMsg(block, src, "{s} must have a known result type", .{builtin_name}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "result type is unknown due to anytype parameter", .{}); + try sema.errNote(block, src, msg, "use @as to provide explicit result type", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + else => |e| return e, + }; +} + fn analyzeAsType( sema: *Sema, block: *Block, @@ -7953,6 +7970,14 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } } +fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const un_node = sema.code.instructions.items(.data)[inst].un_node; + const ptr_ty = try sema.resolveType(block, .unneeded, un_node.operand); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction + return sema.addType(ptr_ty.childType(mod)); +} + fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -8278,13 +8303,12 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@enumFromInt"); const operand = try sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); + return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); @@ -9572,14 +9596,14 @@ fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@intCast"); const operand = try sema.resolveInst(extra.rhs); - return sema.intCast(block, inst_data.src(), dest_ty, dest_ty_src, operand, operand_src, true); + return sema.intCast(block, inst_data.src(), dest_ty, src, operand, operand_src, true); } fn intCast( @@ -9733,11 +9757,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@bitCast"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); switch (dest_ty.zigTypeTag(mod)) { @@ -9756,14 +9780,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), + => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); + const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9774,11 +9798,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); + const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), - .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9792,7 +9816,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Union => "union", else => unreachable, }; - return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ + return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ dest_ty.fmt(mod), container, }); }, @@ -9876,11 +9900,11 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@floatCast"); const operand = try sema.resolveInst(extra.rhs); const target = mod.getTarget(); @@ -9889,7 +9913,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Float => false, else => return sema.fail( block, - dest_ty_src, + src, "expected float type, found '{}'", .{dest_ty.fmt(mod)}, ), @@ -20552,50 +20576,6 @@ fn reifyStruct( return decl_val; } -fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; - const src = LazySrcLoc.nodeOffset(extra.node); - const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; - const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; - - const dest_addrspace = try sema.analyzeAddressSpace(block, addrspace_src, extra.lhs, .pointer); - const ptr = try sema.resolveInst(extra.rhs); - const ptr_ty = sema.typeOf(ptr); - - try sema.checkPtrOperand(block, ptr_src, ptr_ty); - - var ptr_info = ptr_ty.ptrInfo(mod); - const src_addrspace = ptr_info.flags.address_space; - if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "invalid address space cast", .{}); - errdefer msg.destroy(sema.gpa); - try sema.errNote(block, src, msg, "address space '{s}' is not compatible with address space '{s}'", .{ @tagName(src_addrspace), @tagName(dest_addrspace) }); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - - ptr_info.flags.address_space = dest_addrspace; - const dest_ptr_ty = try mod.ptrType(ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) - try mod.optionalType(dest_ptr_ty.toIntern()) - else - dest_ptr_ty; - - try sema.requireRuntimeBlock(block, src, ptr_src); - // TODO: Address space cast safety? - - return block.addInst(.{ - .tag = .addrspace_cast, - .data = .{ .ty_op = .{ - .ty = try sema.addType(dest_ty), - .operand = ptr, - } }, - }); -} - fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { const va_list_ty = try sema.getBuiltinType("VaList"); const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); @@ -20711,14 +20691,14 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@intFromFloat"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - _ = try sema.checkIntType(block, ty_src, dest_ty); + _ = try sema.checkIntType(block, src, dest_ty); try sema.checkFloatType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -20751,14 +20731,14 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@floatFromInt"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - try sema.checkFloatType(block, ty_src, dest_ty); + try sema.checkFloatType(block, src, dest_ty); _ = try sema.checkIntType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -20779,21 +20759,20 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_res = try sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src); - const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const ptr_ty = try sema.resolveType(block, src, extra.lhs); - try sema.checkPtrType(block, type_src, ptr_ty); + const ptr_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@ptrFromInt"); + try sema.checkPtrType(block, src, ptr_ty); const elem_ty = ptr_ty.elemType2(mod); const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { - const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, type_src, msg, "slice length cannot be inferred from address", .{}); + try sema.errNote(block, src, msg, "slice length cannot be inferred from address", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -20841,12 +20820,11 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@errSetCast"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - try sema.checkErrorSetType(block, dest_ty_src, dest_ty); + try sema.checkErrorSetType(block, src, dest_ty); try sema.checkErrorSetType(block, operand_src, operand_ty); // operand must be defined since it can be an invalid error value @@ -20869,7 +20847,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint true; } - try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty); + try sema.resolveInferredErrorSetTy(block, src, dest_ty); try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); for (dest_ty.errorSetNames(mod)) |dest_err_name| { if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) @@ -20924,159 +20902,415 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat return block.addBitCast(dest_ty, operand); } +fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; + const src = LazySrcLoc.nodeOffset(extra.node); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; + const operand = try sema.resolveInst(extra.rhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@ptrCast"); // TODO: better error message (builtin name) + return sema.ptrCastFull( + block, + flags, + src, + operand, + operand_src, + dest_ty, + ); +} + fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@ptrCast"); const operand = try sema.resolveInst(extra.rhs); + + return sema.ptrCastFull( + block, + .{ .ptr_cast = true }, + src, + operand, + operand_src, + dest_ty, + ); +} + +fn ptrCastFull( + sema: *Sema, + block: *Block, + flags: Zir.Inst.FullPtrCastFlags, + src: LazySrcLoc, + operand: Air.Inst.Ref, + operand_src: LazySrcLoc, + dest_ty: Type, +) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - try sema.checkPtrType(block, dest_ty_src, dest_ty); + try sema.checkPtrType(block, src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo(mod); + const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - if (operand_info.flags.is_const and !dest_info.flags.is_const) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); - errdefer msg.destroy(sema.gpa); - try sema.errNote(block, src, msg, "consider using '@constCast'", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - if (operand_info.flags.is_volatile and !dest_info.flags.is_volatile) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{}); - errdefer msg.destroy(sema.gpa); + try sema.resolveTypeLayout(src_info.child.toType()); + try sema.resolveTypeLayout(dest_info.child.toType()); - try sema.errNote(block, src, msg, "consider using '@volatileCast'", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + const src_slice_like = src_info.flags.size == .Slice or + (src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array); + + const dest_slice_like = dest_info.flags.size == .Slice or + (dest_info.flags.size == .One and dest_info.child.toType().zigTypeTag(mod) == .Array); + + if (dest_info.flags.size == .Slice and !src_slice_like) { + return sema.fail(block, src, "illegal pointer cast to slice", .{}); } - if (operand_info.flags.address_space != dest_info.flags.address_space) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{}); - errdefer msg.destroy(sema.gpa); - try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{}); - break :msg msg; + if (dest_info.flags.size == .Slice) { + const src_elem_size = switch (src_info.flags.size) { + .Slice => src_info.child.toType().abiSize(mod), + // pointer to array + .One => src_info.child.toType().childType(mod).abiSize(mod), + else => unreachable, }; - return sema.failWithOwnedErrorMsg(msg); + const dest_elem_size = dest_info.child.toType().abiSize(mod); + if (src_elem_size != dest_elem_size) { + return sema.fail(block, src, "TODO: implement @ptrCast between slices changing the length", .{}); + } } - const dest_is_slice = dest_ty.isSlice(mod); - const operand_is_slice = operand_ty.isSlice(mod); - if (dest_is_slice and !operand_is_slice) { - return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{}); - } - const ptr = if (operand_is_slice and !dest_is_slice) - try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty) - else - operand; + // The checking logic in this function must stay in sync with Sema.coerceInMemoryAllowedPtrs - const dest_elem_ty = dest_ty.elemType2(mod); - try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(mod); - - const operand_elem_ty = operand_ty.elemType2(mod); - try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(mod); - - // If the destination is less aligned than the source, preserve the source alignment - const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { - // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - var dest_ptr_info = dest_ty.ptrInfo(mod); - dest_ptr_info.flags.alignment = Alignment.fromNonzeroByteUnits(operand_align); - if (dest_ty.zigTypeTag(mod) == .Optional) { - break :blk try mod.optionalType((try mod.ptrType(dest_ptr_info)).toIntern()); - } else { - break :blk try mod.ptrType(dest_ptr_info); + if (!flags.ptr_cast) { + check_size: { + if (src_info.flags.size == dest_info.flags.size) break :check_size; + if (src_slice_like and dest_slice_like) break :check_size; + if (src_info.flags.size == .C) break :check_size; + if (dest_info.flags.size == .C) break :check_size; + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cannot implicitly convert {s} pointer to {s} pointer", .{ + pointerSizeString(src_info.flags.size), + pointerSizeString(dest_info.flags.size), + }); + errdefer msg.destroy(sema.gpa); + if (dest_info.flags.size == .Many and + (src_info.flags.size == .Slice or + (src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array))) + { + try sema.errNote(block, src, msg, "use 'ptr' field to convert slice to many pointer", .{}); + } else { + try sema.errNote(block, src, msg, "use @ptrCast to change pointer size", .{}); + } + break :msg msg; + }); + } + + check_child: { + const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: { + // *[n]T -> []T + break :blk src_info.child.toType().childType(mod); + } else src_info.child.toType(); + + const dest_child = dest_info.child.toType(); + + const imc_res = try sema.coerceInMemoryAllowed( + block, + dest_child, + src_child, + !dest_info.flags.is_const, + mod.getTarget(), + src, + operand_src, + ); + if (imc_res == .ok) break :check_child; + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "pointer element type '{}' cannot coerce into element type '{}'", .{ + src_child.fmt(mod), + dest_child.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try imc_res.report(sema, block, src, msg); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer element type", .{}); + break :msg msg; + }); + } + + check_sent: { + if (dest_info.sentinel == .none) break :check_sent; + if (src_info.flags.size == .C) break :check_sent; + if (src_info.sentinel != .none) { + const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child); + if (dest_info.sentinel == coerced_sent) break :check_sent; + } + if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) { + // [*]nT -> []T + const arr_ty = src_info.child.toType(); + if (arr_ty.sentinel(mod)) |src_sentinel| { + const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child); + if (dest_info.sentinel == coerced_sent) break :check_sent; + } + } + return sema.failWithOwnedErrorMsg(msg: { + const msg = if (src_info.sentinel == .none) blk: { + break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{ + dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod), + }); + } else blk: { + break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{ + src_info.sentinel.toValue().fmtValue(src_info.child.toType(), mod), + dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod), + }); + }; + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer sentinel", .{}); + break :msg msg; + }); } - }; - if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(mod); - const dest_elem_size = dest_elem_ty.abiSize(mod); - if (operand_elem_size != dest_elem_size) { - return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); + if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "pointer host size '{}' cannot coerce into pointer host size '{}'", .{ + src_info.packed_offset.host_size, + dest_info.packed_offset.host_size, + }); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer host size", .{}); + break :msg msg; + }); + } + + if (src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "pointer bit offset '{}' cannot coerce into pointer bit offset '{}'", .{ + src_info.packed_offset.bit_offset, + dest_info.packed_offset.bit_offset, + }); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to cast pointer bit offset", .{}); + break :msg msg; + }); + } + + check_allowzero: { + const src_allows_zero = operand_ty.ptrAllowsZero(mod); + const dest_allows_zero = dest_ty.ptrAllowsZero(mod); + if (!src_allows_zero) break :check_allowzero; + if (dest_allows_zero) break :check_allowzero; + + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "'{}' could have null values which are illegal in type '{}'", .{ + operand_ty.fmt(mod), + dest_ty.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @ptrCast to assert the pointer is not null", .{}); + break :msg msg; + }); } + + // TODO: vector index? } - if (dest_align > operand_align) { - const msg = msg: { - const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{}); - errdefer msg.destroy(sema.gpa); + const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse src_info.child.toType().abiAlignment(mod); + const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse dest_info.child.toType().abiAlignment(mod); + if (!flags.align_cast) { + if (dest_align > src_align) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ + operand_ty.fmt(mod), src_align, + }); + try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{ + dest_ty.fmt(mod), dest_align, + }); + try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{}); + break :msg msg; + }); + } + } - try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(mod), operand_align, + if (!flags.addrspace_cast) { + if (src_info.flags.address_space != dest_info.flags.address_space) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, operand_src, msg, "'{}' has address space '{s}'", .{ + operand_ty.fmt(mod), @tagName(src_info.flags.address_space), + }); + try sema.errNote(block, src, msg, "'{}' has address space '{s}'", .{ + dest_ty.fmt(mod), @tagName(dest_info.flags.address_space), + }); + try sema.errNote(block, src, msg, "use @addrSpaceCast to cast pointer address space", .{}); + break :msg msg; + }); + } + } else { + // Some address space casts are always disallowed + if (!target_util.addrSpaceCastIsValid(mod.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "invalid address space cast", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, operand_src, msg, "address space '{s}' is not compatible with address space '{s}'", .{ + @tagName(src_info.flags.address_space), + @tagName(dest_info.flags.address_space), + }); + break :msg msg; }); - try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(mod), dest_align, + } + } + + if (!flags.const_cast) { + if (src_info.flags.is_const and !dest_info.flags.is_const) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @constCast to discard const qualifier", .{}); + break :msg msg; }); + } + } - try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + if (!flags.volatile_cast) { + if (src_info.flags.is_volatile and !dest_info.flags.is_volatile) { + return sema.failWithOwnedErrorMsg(msg: { + const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, src, msg, "use @volatileCast to discard volatile qualifier", .{}); + break :msg msg; + }); + } } - if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) { - return sema.failWithUseOfUndef(block, operand_src); + const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: { + break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty); + } else operand; + + const dest_ptr_ty = if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) blk: { + // Only convert to a many-pointer at first + var info = dest_info; + info.flags.size = .Many; + const ty = try mod.ptrType(info); + if (dest_ty.zigTypeTag(mod) == .Optional) { + break :blk try mod.optionalType(ty.toIntern()); + } else { + break :blk ty; } - if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); + } else dest_ty; + + // Cannot do @addrSpaceCast at comptime + if (!flags.addrspace_cast) { + if (try sema.resolveMaybeUndefVal(ptr)) |ptr_val| { + if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isUndef(mod)) { + return sema.failWithUseOfUndef(block, operand_src); + } + if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) { + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); + } + if (dest_align > src_align) { + if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { + if (addr % dest_align != 0) { + return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); + } + } + } + if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { + if (ptr_val.isUndef(mod)) return sema.addConstUndef(dest_ty); + const arr_len = try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod)); + return sema.addConstant((try mod.intern(.{ .ptr = .{ + .ty = dest_ty.toIntern(), + .addr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr, + .len = arr_len.toIntern(), + } })).toValue()); + } else { + assert(dest_ptr_ty.eql(dest_ty, mod)); + return sema.addConstant(try mod.getCoerced(ptr_val, dest_ty)); + } } - return sema.addConstant(try mod.getCoerced(operand_val, aligned_dest_ty)); } try sema.requireRuntimeBlock(block, src, null); + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and - (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) + (try sema.typeHasRuntimeBits(dest_info.child.toType()) or dest_info.child.toType().zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); - const ok = if (operand_is_slice) ok: { - const len = try sema.analyzeSliceLen(block, operand_src, operand); + const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: { + const len = try sema.analyzeSliceLen(block, operand_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero); } else is_non_zero; try sema.addSafetyCheck(block, ok, .cast_to_null); } - return block.addBitCast(aligned_dest_ty, ptr); -} - -fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; - const src = LazySrcLoc.nodeOffset(extra.node); - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; - const operand = try sema.resolveInst(extra.operand); - const operand_ty = sema.typeOf(operand); - try sema.checkPtrOperand(block, operand_src, operand_ty); + if (block.wantSafety() and dest_align > src_align and try sema.typeHasRuntimeBits(dest_info.child.toType())) { + const align_minus_1 = try sema.addConstant( + try mod.intValue(Type.usize, dest_align - 1), + ); + const ptr_int = try block.addUnOp(.int_from_ptr, ptr); + const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); + const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); + const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: { + const len = try sema.analyzeSliceLen(block, operand_src, ptr); + const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); + break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); + } else is_aligned; + try sema.addSafetyCheck(block, ok, .incorrect_alignment); + } - var ptr_info = operand_ty.ptrInfo(mod); - ptr_info.flags.is_const = false; - const dest_ty = try mod.ptrType(ptr_info); + // If we're going from an array pointer to a slice, this will only be the pointer part! + const result_ptr = if (flags.addrspace_cast) ptr: { + // We can't change address spaces with a bitcast, so this requires two instructions + var intermediate_info = src_info; + intermediate_info.flags.address_space = dest_info.flags.address_space; + const intermediate_ptr_ty = try mod.ptrType(intermediate_info); + const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { + break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); + } else intermediate_ptr_ty; + const intermediate = try block.addInst(.{ + .tag = .addrspace_cast, + .data = .{ .ty_op = .{ + .ty = try sema.addType(intermediate_ty), + .operand = ptr, + } }, + }); + if (intermediate_ty.eql(dest_ptr_ty, mod)) { + // We only changed the address space, so no need for a bitcast + break :ptr intermediate; + } + break :ptr try block.addBitCast(dest_ptr_ty, intermediate); + } else ptr: { + break :ptr try block.addBitCast(dest_ptr_ty, ptr); + }; - if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(try mod.getCoerced(operand_val, dest_ty)); + if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { + // We have to construct a slice using the operand's child's array length + // Note that we know from the check at the start of the function that operand_ty is slice-like + const arr_len = try sema.addConstant( + try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod)), + ); + return block.addInst(.{ + .tag = .slice, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(dest_ty), + .payload = try sema.addExtra(Air.Bin{ + .lhs = result_ptr, + .rhs = arr_len, + }), + } }, + }); + } else { + assert(dest_ptr_ty.eql(dest_ty, mod)); + return result_ptr; } - - try sema.requireRuntimeBlock(block, src, null); - return block.addBitCast(dest_ty, operand); } -fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { +fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const mod = sema.mod; + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -21085,11 +21319,12 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD try sema.checkPtrOperand(block, operand_src, operand_ty); var ptr_info = operand_ty.ptrInfo(mod); - ptr_info.flags.is_volatile = false; + if (flags.const_cast) ptr_info.flags.is_const = false; + if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = try mod.ptrType(ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(operand_val); + return sema.addConstant(try mod.getCoerced(operand_val, dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -21100,24 +21335,21 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); - const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const dest_scalar_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); + const dest_ty = try sema.resolveCastDestType(block, src, extra.lhs, "@truncate"); + const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, src); const operand = try sema.resolveInst(extra.rhs); - const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag(mod) == .Vector; - const dest_ty = if (is_vector) - try mod.vectorType(.{ - .len = operand_ty.vectorLen(mod), - .child = dest_scalar_ty.toIntern(), - }) - else - dest_scalar_ty; - if (dest_is_comptime_int) { + const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector; + const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector; + if (operand_is_vector != dest_is_vector) { + return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }); + } + + if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.coerce(block, dest_ty, operand, operand_src); } @@ -21147,7 +21379,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ + try sema.errNote(block, src, msg, "destination type has {d} bits", .{ dest_info.bits, }); try sema.errNote(block, operand_src, msg, "operand type has {d} bits", .{ @@ -21161,7 +21393,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (try sema.resolveMaybeUndefValIntable(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); - if (!is_vector) { + if (!dest_is_vector) { return sema.addConstant(try mod.getCoerced( try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), dest_ty, @@ -21182,59 +21414,6 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return block.addTyOp(.trunc, dest_ty, operand); } -fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const dest_align = try sema.resolveAlign(block, align_src, extra.lhs); - const ptr = try sema.resolveInst(extra.rhs); - const ptr_ty = sema.typeOf(ptr); - - try sema.checkPtrOperand(block, ptr_src, ptr_ty); - - var ptr_info = ptr_ty.ptrInfo(mod); - ptr_info.flags.alignment = dest_align; - var dest_ty = try mod.ptrType(ptr_info); - if (ptr_ty.zigTypeTag(mod) == .Optional) { - dest_ty = try mod.optionalType(dest_ty.toIntern()); - } - - if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { - const dest_align_bytes = dest_align.toByteUnitsOptional().?; - if (addr % dest_align_bytes != 0) { - return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align_bytes }); - } - } - return sema.addConstant(try mod.getCoerced(val, dest_ty)); - } - - try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); - if (block.wantSafety() and dest_align.order(Alignment.fromNonzeroByteUnits(1)).compare(.gt) and - try sema.typeHasRuntimeBits(ptr_info.child.toType())) - { - const align_minus_1 = try sema.addConstant( - try mod.intValue(Type.usize, dest_align.toByteUnitsOptional().? - 1), - ); - const actual_ptr = if (ptr_ty.isSlice(mod)) - try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) - else - ptr; - const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr); - const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); - const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - const ok = if (ptr_ty.isSlice(mod)) ok: { - const len = try sema.analyzeSliceLen(block, ptr_src, ptr); - const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); - break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); - } else is_aligned; - try sema.addSafetyCheck(block, ok, .incorrect_alignment); - } - return sema.bitCast(block, dest_ty, ptr, ptr_src, null); -} - fn zirBitCount( sema: *Sema, block: *Block, @@ -21546,7 +21725,7 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional(mod)) return, + .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); @@ -21577,7 +21756,7 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional(mod)) return, + .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 159297a4e8f8..1e8ab0fd87ea 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -241,11 +241,6 @@ pub fn print( return; } try writer.writeAll("@enumFromInt("); - try print(.{ - .ty = Type.type, - .val = enum_tag.ty.toValue(), - }, writer, level - 1, mod); - try writer.writeAll(", "); try print(.{ .ty = ip.typeOf(enum_tag.int).toType(), .val = enum_tag.int.toValue(), diff --git a/src/Zir.zig b/src/Zir.zig index 301f50958a6d..45ee755d6bdb 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -230,6 +230,9 @@ pub const Inst = struct { /// Given an indexable type, returns the type of the element at given index. /// Uses the `bin` union field. lhs is the indexable type, rhs is the index. elem_type_index, + /// Given a pointer type, returns its element type. + /// Uses the `un_node` field. + elem_type, /// Given a pointer to an indexable object, returns the len property. This is /// used by for loops. This instruction also emits a for-loop specific compile /// error if the indexable object is not indexable. @@ -838,13 +841,12 @@ pub const Inst = struct { int_cast, /// Implements the `@ptrCast` builtin. /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + /// Not every `@ptrCast` will correspond to this instruction - see also + /// `ptr_cast_full` in `Extended`. ptr_cast, /// Implements the `@truncate` builtin. /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. truncate, - /// Implements the `@alignCast` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest alignment, `rhs` is operand. - align_cast, /// Implements the `@hasDecl` builtin. /// Uses the `pl_node` union field. Payload is `Bin`. @@ -1005,6 +1007,7 @@ pub const Inst = struct { .array_type_sentinel, .vector_type, .elem_type_index, + .elem_type, .indexable_ptr_len, .anyframe_type, .as, @@ -1172,7 +1175,6 @@ pub const Inst = struct { .int_cast, .ptr_cast, .truncate, - .align_cast, .has_field, .clz, .ctz, @@ -1309,6 +1311,7 @@ pub const Inst = struct { .array_type_sentinel, .vector_type, .elem_type_index, + .elem_type, .indexable_ptr_len, .anyframe_type, .as, @@ -1454,7 +1457,6 @@ pub const Inst = struct { .int_cast, .ptr_cast, .truncate, - .align_cast, .has_field, .clz, .ctz, @@ -1539,6 +1541,7 @@ pub const Inst = struct { .array_type_sentinel = .pl_node, .vector_type = .pl_node, .elem_type_index = .bin, + .elem_type = .un_node, .indexable_ptr_len = .un_node, .anyframe_type = .un_node, .as = .bin, @@ -1717,7 +1720,6 @@ pub const Inst = struct { .int_cast = .pl_node, .ptr_cast = .pl_node, .truncate = .pl_node, - .align_cast = .pl_node, .typeof_builtin = .pl_node, .has_decl = .pl_node, @@ -1948,9 +1950,6 @@ pub const Inst = struct { /// `small` 0=>weak 1=>strong /// `operand` is payload index to `Cmpxchg`. cmpxchg, - /// Implement the builtin `@addrSpaceCast` - /// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand. - addrspace_cast, /// Implement builtin `@cVaArg`. /// `operand` is payload index to `BinNode`. c_va_arg, @@ -1963,12 +1962,21 @@ pub const Inst = struct { /// Implement builtin `@cVaStart`. /// `operand` is `src_node: i32`. c_va_start, - /// Implements the `@constCast` builtin. - /// `operand` is payload index to `UnNode`. - const_cast, - /// Implements the `@volatileCast` builtin. + /// Implements the following builtins: + /// `@ptrCast`, `@alignCast`, `@addrSpaceCast`, `@constCast`, `@volatileCast`. + /// Represents an arbitrary nesting of the above builtins. Such a nesting is treated as a + /// single operation which can modify multiple components of a pointer type. + /// `operand` is payload index to `BinNode`. + /// `small` contains `FullPtrCastFlags`. + /// AST node is the root of the nested casts. + /// `lhs` is dest type, `rhs` is operand. + ptr_cast_full, /// `operand` is payload index to `UnNode`. - volatile_cast, + /// `small` contains `FullPtrCastFlags`. + /// Guaranteed to only have flags where no explicit destination type is + /// required (const_cast and volatile_cast). + /// AST node is the root of the nested casts. + ptr_cast_no_dest, /// Implements the `@workItemId` builtin. /// `operand` is payload index to `UnNode`. work_item_id, @@ -2806,6 +2814,14 @@ pub const Inst = struct { dbg_var, }; + pub const FullPtrCastFlags = packed struct(u5) { + ptr_cast: bool = false, + align_cast: bool = false, + addrspace_cast: bool = false, + const_cast: bool = false, + volatile_cast: bool = false, + }; + /// Trailing: /// 0. src_node: i32, // if has_src_node /// 1. tag_type: Ref, // if has_tag_type diff --git a/src/print_zir.zig b/src/print_zir.zig index 029157818957..472461cd0481 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -154,6 +154,7 @@ const Writer = struct { .alloc, .alloc_mut, .alloc_comptime_mut, + .elem_type, .indexable_ptr_len, .anyframe_type, .bit_not, @@ -329,7 +330,6 @@ const Writer = struct { .int_cast, .ptr_cast, .truncate, - .align_cast, .div_exact, .div_floor, .div_trunc, @@ -507,8 +507,6 @@ const Writer = struct { .reify, .c_va_copy, .c_va_end, - .const_cast, - .volatile_cast, .work_item_id, .work_group_size, .work_group_id, @@ -525,7 +523,6 @@ const Writer = struct { .err_set_cast, .wasm_memory_grow, .prefetch, - .addrspace_cast, .c_va_arg, => { const inst_data = self.code.extraData(Zir.Inst.BinNode, extended.operand).data; @@ -539,6 +536,8 @@ const Writer = struct { .builtin_async_call => try self.writeBuiltinAsyncCall(stream, extended), .cmpxchg => try self.writeCmpxchg(stream, extended), + .ptr_cast_full => try self.writePtrCastFull(stream, extended), + .ptr_cast_no_dest => try self.writePtrCastNoDest(stream, extended), } } @@ -964,6 +963,33 @@ const Writer = struct { try self.writeSrc(stream, src); } + fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data; + const src = LazySrcLoc.nodeOffset(extra.node); + if (flags.ptr_cast) try stream.writeAll("ptr_cast, "); + if (flags.align_cast) try stream.writeAll("align_cast, "); + if (flags.addrspace_cast) try stream.writeAll("addrspace_cast, "); + if (flags.const_cast) try stream.writeAll("const_cast, "); + if (flags.volatile_cast) try stream.writeAll("volatile_cast, "); + try self.writeInstRef(stream, extra.lhs); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.rhs); + try stream.writeAll(")) "); + try self.writeSrc(stream, src); + } + + fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data; + const src = LazySrcLoc.nodeOffset(extra.node); + if (flags.const_cast) try stream.writeAll("const_cast, "); + if (flags.volatile_cast) try stream.writeAll("volatile_cast, "); + try self.writeInstRef(stream, extra.operand); + try stream.writeAll(")) "); + try self.writeSrc(stream, src); + } + fn writeAtomicLoad(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].pl_node; const extra = self.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data; From 283d6509730a0ca6fae0ed07a1814f5a9237f282 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 20 Jun 2023 14:19:14 +0100 Subject: [PATCH 2/7] fmt: add rewrite for cast builtin type parameters --- lib/std/zig/render.zig | 58 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 0c93230d464d..72f54b3f4f1f 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1390,14 +1390,51 @@ fn renderBuiltinCall( ) Error!void { const token_tags = tree.tokens.items(.tag); - // TODO remove before release of 0.11.0 + // TODO remove before release of 0.12.0 const slice = tree.tokenSlice(builtin_token); + const rewrite_two_param_cast = params.len == 2 and for ([_][]const u8{ + "@bitCast", + "@errSetCast", + "@floatCast", + "@intCast", + "@ptrCast", + "@intFromFloat", + "@floatToInt", + "@enumFromInt", + "@intToEnum", + "@floatFromInt", + "@intToFloat", + "@ptrFromInt", + "@intToPtr", + "@truncate", + }) |name| { + if (mem.eql(u8, slice, name)) break true; + } else false; + + if (rewrite_two_param_cast) { + const after_last_param_token = tree.lastToken(params[1]) + 1; + if (token_tags[after_last_param_token] != .comma) { + // Render all on one line, no trailing comma. + try ais.writer().writeAll("@as"); + try renderToken(ais, tree, builtin_token + 1, .none); // ( + try renderExpression(gpa, ais, tree, params[0], .comma_space); + } else { + // Render one param per line. + try ais.writer().writeAll("@as"); + ais.pushIndent(); + try renderToken(ais, tree, builtin_token + 1, .newline); // ( + try renderExpression(gpa, ais, tree, params[0], .comma); + } + } + // Corresponding logic below builtin name rewrite below + + // TODO remove before release of 0.11.0 if (mem.eql(u8, slice, "@maximum")) { try ais.writer().writeAll("@max"); } else if (mem.eql(u8, slice, "@minimum")) { try ais.writer().writeAll("@min"); } - // + // TODO remove before release of 0.12.0 else if (mem.eql(u8, slice, "@boolToInt")) { try ais.writer().writeAll("@intFromBool"); } else if (mem.eql(u8, slice, "@enumToInt")) { @@ -1420,6 +1457,23 @@ fn renderBuiltinCall( try renderToken(ais, tree, builtin_token, .none); // @name } + if (rewrite_two_param_cast) { + // Matches with corresponding logic above builtin name rewrite + const after_last_param_token = tree.lastToken(params[1]) + 1; + try ais.writer().writeAll("("); + try renderExpression(gpa, ais, tree, params[1], .none); + try ais.writer().writeAll(")"); + if (token_tags[after_last_param_token] != .comma) { + // Render all on one line, no trailing comma. + return renderToken(ais, tree, after_last_param_token, space); // ) + } else { + // Render one param per line. + ais.popIndent(); + try renderToken(ais, tree, after_last_param_token, .newline); // , + return renderToken(ais, tree, after_last_param_token + 1, space); // ) + } + } + if (params.len == 0) { try renderToken(ais, tree, builtin_token + 1, .none); // ( return renderToken(ais, tree, builtin_token + 2, space); // ) From 447ca4e3fff021f471b748187b53f0a4744ad0bc Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 22 Jun 2023 22:40:13 +0100 Subject: [PATCH 3/7] translate-c: update to new cast builtin syntax --- src/translate_c.zig | 146 +++++++++++++++++++++++----------------- src/translate_c/ast.zig | 88 ++++++++---------------- 2 files changed, 113 insertions(+), 121 deletions(-) diff --git a/src/translate_c.zig b/src/translate_c.zig index 8d5804c5e56d..4078bd0f346c 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -1010,17 +1010,23 @@ fn buildFlexibleArrayFn( const bit_offset = layout.getFieldOffset(field_index); // this is a target-specific constant based on the struct layout const byte_offset = bit_offset / 8; - const casted_self = try Tag.ptr_cast.create(c.arena, .{ + const casted_self = try Tag.as.create(c.arena, .{ .lhs = intermediate_type_ident, - .rhs = self_param, + .rhs = try Tag.ptr_cast.create(c.arena, self_param), }); const field_offset = try transCreateNodeNumber(c, byte_offset, .int); const field_ptr = try Tag.add.create(c.arena, .{ .lhs = casted_self, .rhs = field_offset }); - const alignment = try Tag.alignof.create(c.arena, element_type); - - const ptr_val = try Tag.align_cast.create(c.arena, .{ .lhs = alignment, .rhs = field_ptr }); - const ptr_cast = try Tag.ptr_cast.create(c.arena, .{ .lhs = return_type_ident, .rhs = ptr_val }); + const ptr_cast = try Tag.as.create(c.arena, .{ + .lhs = return_type_ident, + .rhs = try Tag.ptr_cast.create( + c.arena, + try Tag.align_cast.create( + c.arena, + field_ptr, + ), + ), + }); const return_stmt = try Tag.@"return".create(c.arena, ptr_cast); try block_scope.statements.append(return_stmt); @@ -1579,14 +1585,14 @@ fn transOffsetOfExpr( /// pointer arithmetic expressions, where wraparound will ensure we get the correct value. /// node -> @bitCast(usize, @intCast(isize, node)) fn usizeCastForWrappingPtrArithmetic(gpa: mem.Allocator, node: Node) TransError!Node { - const intcast_node = try Tag.int_cast.create(gpa, .{ + const intcast_node = try Tag.as.create(gpa, .{ .lhs = try Tag.type.create(gpa, "isize"), - .rhs = node, + .rhs = try Tag.int_cast.create(gpa, node), }); - return Tag.bit_cast.create(gpa, .{ + return Tag.as.create(gpa, .{ .lhs = try Tag.type.create(gpa, "usize"), - .rhs = intcast_node, + .rhs = try Tag.bit_cast.create(gpa, intcast_node), }); } @@ -1781,7 +1787,10 @@ fn transBinaryOperator( const elem_type = c_pointer.castTag(.c_pointer).?.data.elem_type; const sizeof = try Tag.sizeof.create(c.arena, elem_type); - const bitcast = try Tag.bit_cast.create(c.arena, .{ .lhs = ptrdiff_type, .rhs = infixOpNode }); + const bitcast = try Tag.as.create(c.arena, .{ + .lhs = ptrdiff_type, + .rhs = try Tag.bit_cast.create(c.arena, infixOpNode), + }); return Tag.div_exact.create(c.arena, .{ .lhs = bitcast, @@ -2310,7 +2319,7 @@ fn transIntegerLiteral( // unsigned char y = 256; // How this gets evaluated is the 256 is an integer, which gets truncated to signed char, then bit-casted // to unsigned char, resulting in 0. In order for this to work, we have to emit this zig code: - // var y = @bitCast(u8, @truncate(i8, @as(c_int, 256))); + // var y = @as(u8, @bitCast(@as(i8, @truncate(@as(c_int, 256))))); // Ideally in translate-c we could flatten this out to simply: // var y: u8 = 0; // But the first step is to be correct, and the next step is to make the output more elegant. @@ -2501,7 +2510,10 @@ fn transCCast( .lt => { // @truncate(SameSignSmallerInt, src_int_expr) const ty_node = try transQualTypeIntWidthOf(c, dst_type, src_type_is_signed); - src_int_expr = try Tag.truncate.create(c.arena, .{ .lhs = ty_node, .rhs = src_int_expr }); + src_int_expr = try Tag.as.create(c.arena, .{ + .lhs = ty_node, + .rhs = try Tag.truncate.create(c.arena, src_int_expr), + }); }, .gt => { // @as(SameSignBiggerInt, src_int_expr) @@ -2512,36 +2524,57 @@ fn transCCast( // src_int_expr = src_int_expr }, } - // @bitCast(dest_type, intermediate_value) - return Tag.bit_cast.create(c.arena, .{ .lhs = dst_node, .rhs = src_int_expr }); + // @as(dest_type, @bitCast(intermediate_value)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.bit_cast.create(c.arena, src_int_expr), + }); } if (cIsVector(src_type) or cIsVector(dst_type)) { // C cast where at least 1 operand is a vector requires them to be same size - // @bitCast(dest_type, val) - return Tag.bit_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @bitCast(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.bit_cast.create(c.arena, expr), + }); } if (cIsInteger(dst_type) and qualTypeIsPtr(src_type)) { // @intCast(dest_type, @intFromPtr(val)) const int_from_ptr = try Tag.int_from_ptr.create(c.arena, expr); - return Tag.int_cast.create(c.arena, .{ .lhs = dst_node, .rhs = int_from_ptr }); + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.int_cast.create(c.arena, int_from_ptr), + }); } if (cIsInteger(src_type) and qualTypeIsPtr(dst_type)) { - // @ptrFromInt(dest_type, val) - return Tag.ptr_from_int.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @ptrFromInt(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.ptr_from_int.create(c.arena, expr), + }); } if (cIsFloating(src_type) and cIsFloating(dst_type)) { - // @floatCast(dest_type, val) - return Tag.float_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @floatCast(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.float_cast.create(c.arena, expr), + }); } if (cIsFloating(src_type) and !cIsFloating(dst_type)) { - // @intFromFloat(dest_type, val) - return Tag.int_from_float.create(c.arena, .{ .lhs = dst_node, .rhs = expr }); + // @as(dest_type, @intFromFloat(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.int_from_float.create(c.arena, expr), + }); } if (!cIsFloating(src_type) and cIsFloating(dst_type)) { var rhs = expr; if (qualTypeIsBoolean(src_type)) rhs = try Tag.int_from_bool.create(c.arena, expr); - // @floatFromInt(dest_type, val) - return Tag.float_from_int.create(c.arena, .{ .lhs = dst_node, .rhs = rhs }); + // @as(dest_type, @floatFromInt(val)) + return Tag.as.create(c.arena, .{ + .lhs = dst_node, + .rhs = try Tag.float_from_int.create(c.arena, rhs), + }); } if (qualTypeIsBoolean(src_type) and !qualTypeIsBoolean(dst_type)) { // @intFromBool returns a u1 @@ -3487,9 +3520,9 @@ fn transSignedArrayAccess( const then_value = try Tag.add.create(c.arena, .{ .lhs = container_node, - .rhs = try Tag.int_cast.create(c.arena, .{ + .rhs = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "usize"), - .rhs = tmp_ref, + .rhs = try Tag.int_cast.create(c.arena, tmp_ref), }), }); @@ -3499,17 +3532,17 @@ fn transSignedArrayAccess( }); const minuend = container_node; - const signed_size = try Tag.int_cast.create(c.arena, .{ + const signed_size = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "isize"), - .rhs = tmp_ref, + .rhs = try Tag.int_cast.create(c.arena, tmp_ref), }); const to_cast = try Tag.add_wrap.create(c.arena, .{ .lhs = signed_size, .rhs = try Tag.negate.create(c.arena, Tag.one_literal.init()), }); - const bitcast_node = try Tag.bit_cast.create(c.arena, .{ + const bitcast_node = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "usize"), - .rhs = to_cast, + .rhs = try Tag.bit_cast.create(c.arena, to_cast), }); const subtrahend = try Tag.bit_not.create(c.arena, bitcast_node); const difference = try Tag.sub.create(c.arena, .{ @@ -3566,7 +3599,13 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip const rhs = if (is_longlong or is_signed) blk: { // check if long long first so that signed long long doesn't just become unsigned long long const typeid_node = if (is_longlong) try Tag.type.create(c.arena, "usize") else try transQualTypeIntWidthOf(c, subscr_qt, false); - break :blk try Tag.int_cast.create(c.arena, .{ .lhs = typeid_node, .rhs = try transExpr(c, scope, subscr_expr, .used) }); + break :blk try Tag.as.create(c.arena, .{ + .lhs = typeid_node, + .rhs = try Tag.int_cast.create( + c.arena, + try transExpr(c, scope, subscr_expr, .used), + ), + }); } else try transExpr(c, scope, subscr_expr, .used); const node = try Tag.array_access.create(c.arena, .{ @@ -3968,8 +4007,7 @@ fn transCreateCompoundAssign( } if (is_shift) { - const cast_to_type = try qualTypeToLog2IntRef(c, scope, rhs_qt, loc); - rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node }); + rhs_node = try Tag.int_cast.create(c.arena, rhs_node); } else if (requires_int_cast) { rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node); } @@ -4008,8 +4046,7 @@ fn transCreateCompoundAssign( try block_scope.statements.append(assign); } else { if (is_shift) { - const cast_to_type = try qualTypeToLog2IntRef(c, &block_scope.base, rhs_qt, loc); - rhs_node = try Tag.int_cast.create(c.arena, .{ .lhs = cast_to_type, .rhs = rhs_node }); + rhs_node = try Tag.int_cast.create(c.arena, rhs_node); } else if (requires_int_cast) { rhs_node = try transCCast(c, &block_scope.base, loc, lhs_qt, rhs_qt, rhs_node); } @@ -4029,7 +4066,10 @@ fn transCreateCompoundAssign( // Casting away const or volatile requires us to use @ptrFromInt fn removeCVQualifiers(c: *Context, dst_type_node: Node, expr: Node) Error!Node { const int_from_ptr = try Tag.int_from_ptr.create(c.arena, expr); - return Tag.ptr_from_int.create(c.arena, .{ .lhs = dst_type_node, .rhs = int_from_ptr }); + return Tag.as.create(c.arena, .{ + .lhs = dst_type_node, + .rhs = try Tag.ptr_from_int.create(c.arena, int_from_ptr), + }); } fn transCPtrCast( @@ -4062,11 +4102,12 @@ fn transCPtrCast( // For opaque types a ptrCast is enough expr else blk: { - const alignof = try Tag.std_meta_alignment.create(c.arena, dst_type_node); - const align_cast = try Tag.align_cast.create(c.arena, .{ .lhs = alignof, .rhs = expr }); - break :blk align_cast; + break :blk try Tag.align_cast.create(c.arena, expr); }; - return Tag.ptr_cast.create(c.arena, .{ .lhs = dst_type_node, .rhs = rhs }); + return Tag.as.create(c.arena, .{ + .lhs = dst_type_node, + .rhs = try Tag.ptr_cast.create(c.arena, rhs), + }); } } @@ -4337,19 +4378,6 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 { } } -fn qualTypeToLog2IntRef(c: *Context, scope: *Scope, qt: clang.QualType, source_loc: clang.SourceLocation) !Node { - const int_bit_width = try qualTypeIntBitWidth(c, qt); - - if (int_bit_width != 0) { - // we can perform the log2 now. - const cast_bit_width = math.log2_int(u64, int_bit_width); - return Tag.log2_int_type.create(c.arena, cast_bit_width); - } - - const zig_type = try transQualType(c, scope, qt, source_loc); - return Tag.std_math_Log2Int.create(c.arena, zig_type); -} - fn qualTypeChildIsFnProto(qt: clang.QualType) bool { const ty = qualTypeCanon(qt); @@ -4731,14 +4759,12 @@ fn transCreateNodeShiftOp( const lhs_expr = stmt.getLHS(); const rhs_expr = stmt.getRHS(); - const rhs_location = rhs_expr.getBeginLoc(); // lhs >> @as(u5, rh) const lhs = try transExpr(c, scope, lhs_expr, .used); - const rhs_type = try qualTypeToLog2IntRef(c, scope, stmt.getType(), rhs_location); const rhs = try transExprCoercing(c, scope, rhs_expr, .used); - const rhs_casted = try Tag.int_cast.create(c.arena, .{ .lhs = rhs_type, .rhs = rhs }); + const rhs_casted = try Tag.int_cast.create(c.arena, rhs); return transCreateNodeInfixOp(c, op, lhs, rhs_casted, used); } @@ -6513,9 +6539,9 @@ fn parseCPostfixExpr(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ?Node) }, .LBracket => { const index_val = try macroIntFromBool(c, try parseCExpr(c, m, scope)); - const index = try Tag.int_cast.create(c.arena, .{ + const index = try Tag.as.create(c.arena, .{ .lhs = try Tag.type.create(c.arena, "usize"), - .rhs = index_val, + .rhs = try Tag.int_cast.create(c.arena, index_val), }); node = try Tag.array_access.create(c.arena, .{ .lhs = node, .rhs = index }); try m.skip(c, .RBracket); diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index c8ccfa497f52..a24bff017628 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -115,15 +115,10 @@ pub const Node = extern union { /// @import("std").zig.c_builtins. import_c_builtin, - log2_int_type, - /// @import("std").math.Log2Int(operand) - std_math_Log2Int, - /// @intCast(lhs, rhs) + /// @intCast(operand) int_cast, /// @import("std").zig.c_translation.promoteIntLiteral(value, type, base) helpers_promoteIntLiteral, - /// @import("std").meta.alignment(value) - std_meta_alignment, /// @import("std").zig.c_translation.signedRemainder(lhs, rhs) signed_remainder, /// @divTrunc(lhs, rhs) @@ -132,23 +127,23 @@ pub const Node = extern union { int_from_bool, /// @as(lhs, rhs) as, - /// @truncate(lhs, rhs) + /// @truncate(operand) truncate, - /// @bitCast(lhs, rhs) + /// @bitCast(operand) bit_cast, - /// @floatCast(lhs, rhs) + /// @floatCast(operand) float_cast, - /// @intFromFloat(lhs, rhs) + /// @intFromFloat(operand) int_from_float, - /// @floatFromInt(lhs, rhs) + /// @floatFromInt(operand) float_from_int, - /// @ptrFromInt(lhs, rhs) + /// @ptrFromInt(operand) ptr_from_int, /// @intFromPtr(operand) int_from_ptr, - /// @alignCast(lhs, rhs) + /// @alignCast(operand) align_cast, - /// @ptrCast(lhs, rhs) + /// @ptrCast(operand) ptr_cast, /// @divExact(lhs, rhs) div_exact, @@ -254,7 +249,6 @@ pub const Node = extern union { .@"comptime", .@"defer", .asm_simple, - .std_math_Log2Int, .negate, .negate_wrap, .bit_not, @@ -270,12 +264,20 @@ pub const Node = extern union { .switch_else, .block_single, .helpers_sizeof, - .std_meta_alignment, .int_from_bool, .sizeof, .alignof, .typeof, .typeinfo, + .align_cast, + .truncate, + .bit_cast, + .float_cast, + .int_from_float, + .float_from_int, + .ptr_from_int, + .ptr_cast, + .int_cast, => Payload.UnOp, .add, @@ -314,24 +316,15 @@ pub const Node = extern union { .bit_xor_assign, .div_trunc, .signed_remainder, - .int_cast, .as, - .truncate, - .bit_cast, - .float_cast, - .int_from_float, - .float_from_int, - .ptr_from_int, .array_cat, .ellipsis3, .assign, - .align_cast, .array_access, .std_mem_zeroinit, .helpers_flexible_array_type, .helpers_shuffle_vector_index, .vector, - .ptr_cast, .div_exact, .offset_of, .helpers_cast, @@ -367,7 +360,6 @@ pub const Node = extern union { .c_pointer, .single_pointer => Payload.Pointer, .array_type, .null_sentinel_array_type => Payload.Array, .arg_redecl, .alias, .fail_decl => Payload.ArgRedecl, - .log2_int_type => Payload.Log2IntType, .var_simple, .pub_var_simple, .static_local_var, .mut_str => Payload.SimpleVarDecl, .enum_constant => Payload.EnumConstant, .array_filler => Payload.ArrayFiller, @@ -644,11 +636,6 @@ pub const Payload = struct { }, }; - pub const Log2IntType = struct { - base: Payload, - data: std.math.Log2Int(u64), - }; - pub const SimpleVarDecl = struct { base: Payload, data: struct { @@ -885,11 +872,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { try c.buf.append('\n'); return @as(NodeIndex, 0); // error: integer value 0 cannot be coerced to type 'std.mem.Allocator.Error!u32' }, - .std_math_Log2Int => { - const payload = node.castTag(.std_math_Log2Int).?.data; - const import_node = try renderStdImport(c, &.{ "math", "Log2Int" }); - return renderCall(c, import_node, &.{payload}); - }, .helpers_cast => { const payload = node.castTag(.helpers_cast).?.data; const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "cast" }); @@ -900,11 +882,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "promoteIntLiteral" }); return renderCall(c, import_node, &.{ payload.type, payload.value, payload.base }); }, - .std_meta_alignment => { - const payload = node.castTag(.std_meta_alignment).?.data; - const import_node = try renderStdImport(c, &.{ "meta", "alignment" }); - return renderCall(c, import_node, &.{payload}); - }, .helpers_sizeof => { const payload = node.castTag(.helpers_sizeof).?.data; const import_node = try renderStdImport(c, &.{ "zig", "c_translation", "sizeof" }); @@ -1081,14 +1058,6 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { .data = undefined, }); }, - .log2_int_type => { - const payload = node.castTag(.log2_int_type).?.data; - return c.addNode(.{ - .tag = .identifier, - .main_token = try c.addTokenFmt(.identifier, "u{d}", .{payload}), - .data = undefined, - }); - }, .identifier => { const payload = node.castTag(.identifier).?.data; return c.addNode(.{ @@ -1344,7 +1313,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .int_cast => { const payload = node.castTag(.int_cast).?.data; - return renderBuiltinCall(c, "@intCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@intCast", &.{payload}); }, .signed_remainder => { const payload = node.castTag(.signed_remainder).?.data; @@ -1365,27 +1334,27 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .truncate => { const payload = node.castTag(.truncate).?.data; - return renderBuiltinCall(c, "@truncate", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@truncate", &.{payload}); }, .bit_cast => { const payload = node.castTag(.bit_cast).?.data; - return renderBuiltinCall(c, "@bitCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@bitCast", &.{payload}); }, .float_cast => { const payload = node.castTag(.float_cast).?.data; - return renderBuiltinCall(c, "@floatCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@floatCast", &.{payload}); }, .int_from_float => { const payload = node.castTag(.int_from_float).?.data; - return renderBuiltinCall(c, "@intFromFloat", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@intFromFloat", &.{payload}); }, .float_from_int => { const payload = node.castTag(.float_from_int).?.data; - return renderBuiltinCall(c, "@floatFromInt", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@floatFromInt", &.{payload}); }, .ptr_from_int => { const payload = node.castTag(.ptr_from_int).?.data; - return renderBuiltinCall(c, "@ptrFromInt", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@ptrFromInt", &.{payload}); }, .int_from_ptr => { const payload = node.castTag(.int_from_ptr).?.data; @@ -1393,11 +1362,11 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .align_cast => { const payload = node.castTag(.align_cast).?.data; - return renderBuiltinCall(c, "@alignCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@alignCast", &.{payload}); }, .ptr_cast => { const payload = node.castTag(.ptr_cast).?.data; - return renderBuiltinCall(c, "@ptrCast", &.{ payload.lhs, payload.rhs }); + return renderBuiltinCall(c, "@ptrCast", &.{payload}); }, .div_exact => { const payload = node.castTag(.div_exact).?.data; @@ -2330,14 +2299,11 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex { .float_from_int, .ptr_from_int, .std_mem_zeroes, - .std_math_Log2Int, - .log2_int_type, .int_from_ptr, .sizeof, .alignof, .typeof, .typeinfo, - .std_meta_alignment, .vector, .helpers_sizeof, .helpers_cast, From f26dda21171e26f44aeec8c59a75bbb3331eeb2e Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 22 Jun 2023 18:46:56 +0100 Subject: [PATCH 4/7] all: migrate code to new cast builtin syntax Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change --- lib/compiler_rt/addf3.zig | 44 +- lib/compiler_rt/addf3_test.zig | 46 +- lib/compiler_rt/arm.zig | 2 +- lib/compiler_rt/atomics.zig | 6 +- lib/compiler_rt/aulldiv.zig | 4 +- lib/compiler_rt/aullrem.zig | 4 +- lib/compiler_rt/ceil.zig | 16 +- lib/compiler_rt/clear_cache.zig | 4 +- lib/compiler_rt/clzdi2_test.zig | 2 +- lib/compiler_rt/clzsi2_test.zig | 4 +- lib/compiler_rt/clzti2_test.zig | 2 +- lib/compiler_rt/cmptf2.zig | 12 +- lib/compiler_rt/common.zig | 26 +- lib/compiler_rt/comparef.zig | 18 +- lib/compiler_rt/cos.zig | 10 +- lib/compiler_rt/count0bits.zig | 24 +- lib/compiler_rt/ctzdi2_test.zig | 2 +- lib/compiler_rt/ctzsi2_test.zig | 2 +- lib/compiler_rt/ctzti2_test.zig | 2 +- lib/compiler_rt/divdf3.zig | 64 +- lib/compiler_rt/divdf3_test.zig | 2 +- lib/compiler_rt/divhf3.zig | 2 +- lib/compiler_rt/divsf3.zig | 58 +- lib/compiler_rt/divsf3_test.zig | 2 +- lib/compiler_rt/divtf3.zig | 72 +-- lib/compiler_rt/divtf3_test.zig | 6 +- lib/compiler_rt/divti3.zig | 6 +- lib/compiler_rt/divti3_test.zig | 8 +- lib/compiler_rt/divxf3.zig | 76 +-- lib/compiler_rt/divxf3_test.zig | 8 +- lib/compiler_rt/emutls.zig | 57 +- lib/compiler_rt/exp.zig | 22 +- lib/compiler_rt/exp2.zig | 38 +- lib/compiler_rt/extenddftf2.zig | 4 +- lib/compiler_rt/extenddfxf2.zig | 2 +- lib/compiler_rt/extendf.zig | 14 +- lib/compiler_rt/extendf_test.zig | 42 +- lib/compiler_rt/extendhfdf2.zig | 2 +- lib/compiler_rt/extendhfsf2.zig | 6 +- lib/compiler_rt/extendhftf2.zig | 2 +- lib/compiler_rt/extendhfxf2.zig | 2 +- lib/compiler_rt/extendsfdf2.zig | 4 +- lib/compiler_rt/extendsftf2.zig | 4 +- lib/compiler_rt/extendsfxf2.zig | 2 +- lib/compiler_rt/extendxftf2.zig | 4 +- lib/compiler_rt/fabs.zig | 4 +- lib/compiler_rt/ffsdi2_test.zig | 2 +- lib/compiler_rt/ffssi2_test.zig | 2 +- lib/compiler_rt/ffsti2_test.zig | 2 +- lib/compiler_rt/fixdfti.zig | 2 +- lib/compiler_rt/fixhfti.zig | 2 +- lib/compiler_rt/fixsfti.zig | 2 +- lib/compiler_rt/fixtfti.zig | 2 +- lib/compiler_rt/fixunsdfti.zig | 2 +- lib/compiler_rt/fixunshfti.zig | 2 +- lib/compiler_rt/fixunssfti.zig | 2 +- lib/compiler_rt/fixunstfti.zig | 2 +- lib/compiler_rt/fixunsxfti.zig | 2 +- lib/compiler_rt/fixxfti.zig | 2 +- lib/compiler_rt/float_from_int.zig | 12 +- lib/compiler_rt/float_from_int_test.zig | 96 +-- lib/compiler_rt/floattidf.zig | 2 +- lib/compiler_rt/floattihf.zig | 2 +- lib/compiler_rt/floattisf.zig | 2 +- lib/compiler_rt/floattitf.zig | 2 +- lib/compiler_rt/floattixf.zig | 2 +- lib/compiler_rt/floatuntidf.zig | 2 +- lib/compiler_rt/floatuntihf.zig | 2 +- lib/compiler_rt/floatuntisf.zig | 2 +- lib/compiler_rt/floatuntitf.zig | 2 +- lib/compiler_rt/floatuntixf.zig | 2 +- lib/compiler_rt/floor.zig | 22 +- lib/compiler_rt/fma.zig | 38 +- lib/compiler_rt/fmod.zig | 64 +- lib/compiler_rt/int.zig | 82 +-- lib/compiler_rt/int_from_float.zig | 12 +- lib/compiler_rt/log.zig | 28 +- lib/compiler_rt/log10.zig | 36 +- lib/compiler_rt/log2.zig | 36 +- lib/compiler_rt/modti3.zig | 6 +- lib/compiler_rt/modti3_test.zig | 2 +- lib/compiler_rt/mulXi3.zig | 8 +- lib/compiler_rt/mulXi3_test.zig | 16 +- lib/compiler_rt/mulf3.zig | 60 +- lib/compiler_rt/mulf3_test.zig | 52 +- lib/compiler_rt/mulo.zig | 2 +- lib/compiler_rt/mulodi4_test.zig | 48 +- lib/compiler_rt/mulosi4_test.zig | 52 +- lib/compiler_rt/muloti4_test.zig | 62 +- lib/compiler_rt/negv.zig | 2 +- lib/compiler_rt/parity.zig | 8 +- lib/compiler_rt/paritydi2_test.zig | 10 +- lib/compiler_rt/paritysi2_test.zig | 10 +- lib/compiler_rt/parityti2_test.zig | 10 +- lib/compiler_rt/popcount.zig | 4 +- lib/compiler_rt/popcountdi2_test.zig | 10 +- lib/compiler_rt/popcountsi2_test.zig | 10 +- lib/compiler_rt/popcountti2_test.zig | 10 +- lib/compiler_rt/powiXf2.zig | 2 +- lib/compiler_rt/powiXf2_test.zig | 248 ++++---- lib/compiler_rt/rem_pio2.zig | 26 +- lib/compiler_rt/rem_pio2_large.zig | 30 +- lib/compiler_rt/rem_pio2f.zig | 10 +- lib/compiler_rt/round.zig | 16 +- lib/compiler_rt/shift.zig | 26 +- lib/compiler_rt/shift_test.zig | 578 ++++++++--------- lib/compiler_rt/sin.zig | 14 +- lib/compiler_rt/sincos.zig | 20 +- lib/compiler_rt/sqrt.zig | 32 +- lib/compiler_rt/subdf3.zig | 4 +- lib/compiler_rt/subhf3.zig | 2 +- lib/compiler_rt/subsf3.zig | 4 +- lib/compiler_rt/subtf3.zig | 2 +- lib/compiler_rt/tan.zig | 10 +- lib/compiler_rt/trig.zig | 14 +- lib/compiler_rt/trunc.zig | 28 +- lib/compiler_rt/truncdfhf2.zig | 4 +- lib/compiler_rt/truncf.zig | 40 +- lib/compiler_rt/truncf_test.zig | 42 +- lib/compiler_rt/truncsfhf2.zig | 6 +- lib/compiler_rt/trunctfhf2.zig | 2 +- lib/compiler_rt/trunctfxf2.zig | 8 +- lib/compiler_rt/truncxfhf2.zig | 2 +- lib/compiler_rt/udivmod.zig | 28 +- lib/compiler_rt/udivmodei4.zig | 14 +- lib/compiler_rt/udivmodti4.zig | 2 +- lib/compiler_rt/udivti3.zig | 2 +- lib/compiler_rt/umodti3.zig | 4 +- lib/ssp.zig | 2 +- lib/std/Build.zig | 12 +- lib/std/Build/Cache.zig | 4 +- lib/std/Build/Step.zig | 4 +- lib/std/Build/Step/CheckObject.zig | 14 +- lib/std/Build/Step/Compile.zig | 6 +- lib/std/Build/Step/Run.zig | 4 +- lib/std/Progress.zig | 4 +- lib/std/Thread.zig | 42 +- lib/std/Thread/Futex.zig | 50 +- lib/std/Thread/Mutex.zig | 6 +- lib/std/array_hash_map.zig | 46 +- lib/std/array_list.zig | 12 +- lib/std/atomic/Atomic.zig | 20 +- lib/std/atomic/queue.zig | 2 +- lib/std/atomic/stack.zig | 2 +- lib/std/base64.zig | 10 +- lib/std/bit_set.zig | 42 +- lib/std/bounded_array.zig | 2 +- lib/std/builtin.zig | 2 +- lib/std/c.zig | 2 +- lib/std/c/darwin.zig | 68 +- lib/std/c/dragonfly.zig | 20 +- lib/std/c/freebsd.zig | 22 +- lib/std/c/haiku.zig | 10 +- lib/std/c/linux.zig | 2 +- lib/std/c/netbsd.zig | 16 +- lib/std/c/openbsd.zig | 14 +- lib/std/c/solaris.zig | 28 +- lib/std/child_process.zig | 26 +- lib/std/coff.zig | 32 +- lib/std/compress/deflate/bits_utils.zig | 2 +- lib/std/compress/deflate/compressor.zig | 38 +- lib/std/compress/deflate/compressor_test.zig | 2 +- lib/std/compress/deflate/decompressor.zig | 86 +-- lib/std/compress/deflate/deflate_fast.zig | 92 +-- .../compress/deflate/deflate_fast_test.zig | 8 +- lib/std/compress/deflate/dict_decoder.zig | 20 +- .../compress/deflate/huffman_bit_writer.zig | 110 ++-- lib/std/compress/deflate/huffman_code.zig | 20 +- lib/std/compress/deflate/token.zig | 10 +- lib/std/compress/gzip.zig | 2 +- lib/std/compress/lzma/decode.zig | 10 +- lib/std/compress/lzma2/decode.zig | 6 +- lib/std/compress/xz.zig | 2 +- lib/std/compress/xz/block.zig | 6 +- lib/std/compress/zlib.zig | 6 +- lib/std/compress/zstandard/decode/block.zig | 14 +- lib/std/compress/zstandard/decode/fse.zig | 14 +- lib/std/compress/zstandard/decode/huffman.zig | 10 +- lib/std/compress/zstandard/decompress.zig | 8 +- lib/std/crypto/25519/curve25519.zig | 2 +- lib/std/crypto/25519/edwards25519.zig | 24 +- lib/std/crypto/25519/field.zig | 22 +- lib/std/crypto/25519/scalar.zig | 74 +-- lib/std/crypto/Certificate.zig | 22 +- lib/std/crypto/Certificate/Bundle.zig | 6 +- lib/std/crypto/Certificate/Bundle/macos.zig | 6 +- lib/std/crypto/aegis.zig | 2 +- lib/std/crypto/aes/soft.zig | 102 +-- lib/std/crypto/aes_ocb.zig | 8 +- lib/std/crypto/argon2.zig | 22 +- lib/std/crypto/ascon.zig | 4 +- lib/std/crypto/bcrypt.zig | 8 +- lib/std/crypto/benchmark.zig | 52 +- lib/std/crypto/blake2.zig | 18 +- lib/std/crypto/blake3.zig | 14 +- lib/std/crypto/chacha20.zig | 8 +- lib/std/crypto/ecdsa.zig | 6 +- lib/std/crypto/ff.zig | 70 +-- lib/std/crypto/ghash_polyval.zig | 62 +- lib/std/crypto/isap.zig | 2 +- lib/std/crypto/keccak_p.zig | 4 +- lib/std/crypto/kyber_d00.zig | 72 +-- lib/std/crypto/md5.zig | 6 +- lib/std/crypto/pbkdf2.zig | 2 +- lib/std/crypto/pcurves/common.zig | 6 +- lib/std/crypto/pcurves/p256.zig | 20 +- lib/std/crypto/pcurves/p256/p256_64.zig | 72 +-- .../crypto/pcurves/p256/p256_scalar_64.zig | 72 +-- lib/std/crypto/pcurves/p384.zig | 20 +- lib/std/crypto/pcurves/p384/p384_64.zig | 104 +-- .../crypto/pcurves/p384/p384_scalar_64.zig | 104 +-- lib/std/crypto/pcurves/secp256k1.zig | 32 +- .../crypto/pcurves/secp256k1/secp256k1_64.zig | 72 +-- .../pcurves/secp256k1/secp256k1_scalar_64.zig | 72 +-- lib/std/crypto/phc_encoding.zig | 2 +- lib/std/crypto/poly1305.zig | 14 +- lib/std/crypto/salsa20.zig | 4 +- lib/std/crypto/scrypt.zig | 46 +- lib/std/crypto/sha1.zig | 6 +- lib/std/crypto/sha2.zig | 20 +- lib/std/crypto/siphash.zig | 12 +- lib/std/crypto/tlcsprng.zig | 6 +- lib/std/crypto/tls.zig | 20 +- lib/std/crypto/tls/Client.zig | 56 +- lib/std/crypto/utils.zig | 16 +- lib/std/cstr.zig | 4 +- lib/std/debug.zig | 116 ++-- lib/std/dwarf.zig | 12 +- lib/std/dynamic_library.zig | 42 +- lib/std/elf.zig | 30 +- lib/std/enums.zig | 30 +- lib/std/event/lock.zig | 6 +- lib/std/event/loop.zig | 12 +- lib/std/event/rwlock.zig | 8 +- lib/std/fmt.zig | 70 +-- lib/std/fmt/errol.zig | 98 +-- lib/std/fmt/parse_float.zig | 2 +- lib/std/fmt/parse_float/common.zig | 10 +- .../fmt/parse_float/convert_eisel_lemire.zig | 16 +- lib/std/fmt/parse_float/convert_fast.zig | 10 +- lib/std/fmt/parse_float/convert_hex.zig | 2 +- lib/std/fmt/parse_float/convert_slow.zig | 12 +- lib/std/fmt/parse_float/decimal.zig | 20 +- lib/std/fmt/parse_float/parse.zig | 14 +- lib/std/fs.zig | 37 +- lib/std/fs/file.zig | 18 +- lib/std/fs/get_app_data_dir.zig | 2 +- lib/std/fs/wasi.zig | 4 +- lib/std/fs/watch.zig | 16 +- lib/std/hash/adler.zig | 2 +- lib/std/hash/auto_hash.zig | 4 +- lib/std/hash/benchmark.zig | 12 +- lib/std/hash/cityhash.zig | 26 +- lib/std/hash/crc.zig | 24 +- lib/std/hash/murmur.zig | 50 +- lib/std/hash/wyhash.zig | 6 +- lib/std/hash/xxhash.zig | 2 +- lib/std/hash_map.zig | 44 +- lib/std/heap.zig | 50 +- lib/std/heap/PageAllocator.zig | 13 +- lib/std/heap/ThreadSafeAllocator.zig | 6 +- lib/std/heap/WasmAllocator.zig | 20 +- lib/std/heap/WasmPageAllocator.zig | 12 +- lib/std/heap/arena_allocator.zig | 24 +- lib/std/heap/general_purpose_allocator.zig | 56 +- lib/std/heap/log_to_writer_allocator.zig | 6 +- lib/std/heap/logging_allocator.zig | 6 +- lib/std/heap/memory_pool.zig | 8 +- lib/std/http/Client.zig | 14 +- lib/std/http/Server.zig | 12 +- lib/std/http/protocol.zig | 48 +- lib/std/io.zig | 2 +- lib/std/io/bit_reader.zig | 22 +- lib/std/io/bit_writer.zig | 28 +- lib/std/io/c_writer.zig | 2 +- lib/std/io/reader.zig | 2 +- lib/std/json/scanner.zig | 8 +- lib/std/json/static.zig | 20 +- lib/std/json/stringify.zig | 4 +- lib/std/json/write_stream.zig | 6 +- lib/std/leb128.zig | 42 +- lib/std/macho.zig | 14 +- lib/std/math.zig | 86 +-- lib/std/math/acos.zig | 16 +- lib/std/math/acosh.zig | 4 +- lib/std/math/asin.zig | 12 +- lib/std/math/asinh.zig | 8 +- lib/std/math/atan.zig | 10 +- lib/std/math/atan2.zig | 16 +- lib/std/math/atanh.zig | 10 +- lib/std/math/big/int.zig | 64 +- lib/std/math/big/int_test.zig | 66 +- lib/std/math/big/rational.zig | 22 +- lib/std/math/cbrt.zig | 22 +- lib/std/math/complex/atan.zig | 4 +- lib/std/math/complex/cosh.zig | 16 +- lib/std/math/complex/exp.zig | 16 +- lib/std/math/complex/ldexp.zig | 24 +- lib/std/math/complex/sinh.zig | 16 +- lib/std/math/complex/sqrt.zig | 8 +- lib/std/math/complex/tanh.zig | 12 +- lib/std/math/copysign.zig | 6 +- lib/std/math/cosh.zig | 10 +- lib/std/math/expm1.zig | 24 +- lib/std/math/expo2.zig | 4 +- lib/std/math/float.zig | 2 +- lib/std/math/frexp.zig | 18 +- lib/std/math/hypot.zig | 18 +- lib/std/math/ilogb.zig | 8 +- lib/std/math/isfinite.zig | 2 +- lib/std/math/isinf.zig | 2 +- lib/std/math/isnormal.zig | 6 +- lib/std/math/ldexp.zig | 30 +- lib/std/math/log.zig | 4 +- lib/std/math/log10.zig | 14 +- lib/std/math/log1p.zig | 24 +- lib/std/math/modf.zig | 28 +- lib/std/math/pow.zig | 4 +- lib/std/math/signbit.zig | 2 +- lib/std/math/sinh.zig | 10 +- lib/std/math/sqrt.zig | 2 +- lib/std/math/tanh.zig | 12 +- lib/std/mem.zig | 223 ++++--- lib/std/mem/Allocator.zig | 18 +- lib/std/meta.zig | 18 +- lib/std/meta/trailer_flags.zig | 6 +- lib/std/meta/trait.zig | 2 +- lib/std/multi_array_list.zig | 33 +- lib/std/net.zig | 78 +-- lib/std/os.zig | 250 ++++---- lib/std/os/linux.zig | 516 +++++++-------- lib/std/os/linux/bpf.zig | 30 +- lib/std/os/linux/bpf/helpers.zig | 282 ++++----- lib/std/os/linux/io_uring.zig | 101 ++- lib/std/os/linux/ioctl.zig | 2 +- lib/std/os/linux/start_pie.zig | 8 +- lib/std/os/linux/test.zig | 16 +- lib/std/os/linux/tls.zig | 6 +- lib/std/os/linux/vdso.zig | 30 +- lib/std/os/plan9.zig | 4 +- lib/std/os/test.zig | 4 +- lib/std/os/uefi.zig | 2 +- lib/std/os/uefi/pool_allocator.zig | 6 +- .../uefi/protocols/device_path_protocol.zig | 26 +- lib/std/os/uefi/protocols/file_protocol.zig | 4 +- lib/std/os/uefi/protocols/hii.zig | 2 +- .../protocols/managed_network_protocol.zig | 2 +- lib/std/os/uefi/protocols/udp6_protocol.zig | 4 +- lib/std/os/uefi/tables/boot_services.zig | 2 +- lib/std/os/wasi.zig | 6 +- lib/std/os/windows.zig | 166 ++--- lib/std/os/windows/user32.zig | 2 +- lib/std/os/windows/ws2_32.zig | 2 +- lib/std/packed_int_array.zig | 32 +- lib/std/pdb.zig | 30 +- lib/std/process.zig | 18 +- lib/std/rand.zig | 59 +- lib/std/rand/Isaac64.zig | 8 +- lib/std/rand/Pcg.zig | 10 +- lib/std/rand/RomuTrio.zig | 8 +- lib/std/rand/Sfc64.zig | 4 +- lib/std/rand/Xoroshiro128.zig | 6 +- lib/std/rand/Xoshiro256.zig | 10 +- lib/std/rand/benchmark.zig | 4 +- lib/std/rand/test.zig | 16 +- lib/std/rand/ziggurat.zig | 6 +- lib/std/segmented_list.zig | 16 +- lib/std/simd.zig | 24 +- lib/std/sort/pdq.zig | 4 +- lib/std/start.zig | 24 +- lib/std/start_windows_tls.zig | 2 +- lib/std/tar.zig | 14 +- lib/std/target.zig | 18 +- lib/std/testing/failing_allocator.zig | 6 +- lib/std/time.zig | 16 +- lib/std/time/epoch.zig | 12 +- lib/std/tz.zig | 4 +- lib/std/unicode.zig | 32 +- lib/std/unicode/throughput_test.zig | 4 +- lib/std/valgrind.zig | 2 +- lib/std/valgrind/callgrind.zig | 2 +- lib/std/valgrind/memcheck.zig | 22 +- lib/std/zig.zig | 2 +- lib/std/zig/Ast.zig | 10 +- lib/std/zig/CrossTarget.zig | 2 +- lib/std/zig/ErrorBundle.zig | 34 +- lib/std/zig/Parse.zig | 30 +- lib/std/zig/Server.zig | 28 +- lib/std/zig/c_builtins.zig | 20 +- lib/std/zig/c_translation.zig | 65 +- lib/std/zig/number_literal.zig | 6 +- lib/std/zig/parser_test.zig | 20 +- lib/std/zig/perf_test.zig | 6 +- lib/std/zig/render.zig | 4 +- lib/std/zig/string_literal.zig | 4 +- lib/std/zig/system/NativeTargetInfo.zig | 56 +- lib/std/zig/system/arm.zig | 14 +- lib/std/zig/system/windows.zig | 40 +- lib/std/zig/tokenizer.zig | 2 +- lib/test_runner.zig | 6 +- src/Air.zig | 26 +- src/AstGen.zig | 346 +++++----- src/Autodoc.zig | 98 +-- src/Compilation.zig | 40 +- src/InternPool.zig | 410 ++++++------ src/Liveness.zig | 68 +- src/Liveness/Verify.zig | 22 +- src/Manifest.zig | 22 +- src/Module.zig | 166 ++--- src/Package.zig | 6 +- src/Sema.zig | 590 +++++++++--------- src/TypedValue.zig | 8 +- src/Zir.zig | 44 +- src/arch/aarch64/CodeGen.zig | 176 +++--- src/arch/aarch64/Emit.zig | 44 +- src/arch/aarch64/Mir.zig | 2 +- src/arch/aarch64/bits.zig | 218 +++---- src/arch/arm/CodeGen.zig | 192 +++--- src/arch/arm/Emit.zig | 38 +- src/arch/arm/Mir.zig | 2 +- src/arch/arm/abi.zig | 2 +- src/arch/arm/bits.zig | 64 +- src/arch/riscv64/CodeGen.zig | 38 +- src/arch/riscv64/Emit.zig | 10 +- src/arch/riscv64/Mir.zig | 2 +- src/arch/riscv64/bits.zig | 46 +- src/arch/sparc64/CodeGen.zig | 86 +-- src/arch/sparc64/Emit.zig | 26 +- src/arch/sparc64/Mir.zig | 2 +- src/arch/sparc64/bits.zig | 80 +-- src/arch/wasm/CodeGen.zig | 328 +++++----- src/arch/wasm/Emit.zig | 22 +- src/arch/wasm/Mir.zig | 16 +- src/arch/x86_64/CodeGen.zig | 458 +++++++------- src/arch/x86_64/Emit.zig | 28 +- src/arch/x86_64/Encoding.zig | 4 +- src/arch/x86_64/Lower.zig | 10 +- src/arch/x86_64/Mir.zig | 34 +- src/arch/x86_64/abi.zig | 4 +- src/arch/x86_64/bits.zig | 32 +- src/arch/x86_64/encoder.zig | 14 +- src/clang.zig | 2 +- src/codegen.zig | 38 +- src/codegen/c.zig | 102 +-- src/codegen/c/type.zig | 20 +- src/codegen/llvm.zig | 264 ++++---- src/codegen/llvm/bindings.zig | 2 +- src/codegen/spirv.zig | 50 +- src/codegen/spirv/Assembler.zig | 24 +- src/codegen/spirv/Cache.zig | 124 ++-- src/codegen/spirv/Module.zig | 14 +- src/codegen/spirv/Section.zig | 30 +- src/crash_report.zig | 48 +- src/glibc.zig | 8 +- src/link/C.zig | 8 +- src/link/Coff.zig | 110 ++-- src/link/Coff/ImportTable.zig | 6 +- src/link/Coff/Relocation.zig | 24 +- src/link/Dwarf.zig | 116 ++-- src/link/Elf.zig | 104 +-- src/link/MachO.zig | 98 +-- src/link/MachO/Archive.zig | 2 +- src/link/MachO/CodeSignature.zig | 18 +- src/link/MachO/DebugSymbols.zig | 42 +- src/link/MachO/DwarfInfo.zig | 8 +- src/link/MachO/Dylib.zig | 12 +- src/link/MachO/Object.zig | 64 +- src/link/MachO/Relocation.zig | 46 +- src/link/MachO/Trie.zig | 2 +- src/link/MachO/UnwindInfo.zig | 108 ++-- src/link/MachO/ZldAtom.zig | 120 ++-- src/link/MachO/dead_strip.zig | 24 +- src/link/MachO/dyld_info/Rebase.zig | 10 +- src/link/MachO/dyld_info/bind.zig | 36 +- src/link/MachO/eh_frame.zig | 72 +-- src/link/MachO/load_commands.zig | 24 +- src/link/MachO/thunks.zig | 12 +- src/link/MachO/zld.zig | 144 ++--- src/link/Plan9.zig | 44 +- src/link/Wasm.zig | 276 ++++---- src/link/Wasm/Atom.zig | 18 +- src/link/Wasm/Object.zig | 32 +- src/link/Wasm/types.zig | 2 +- src/link/strtab.zig | 6 +- src/link/table_section.zig | 2 +- src/link/tapi/Tokenizer.zig | 4 +- src/main.zig | 18 +- src/objcopy.zig | 54 +- src/print_air.zig | 22 +- src/print_targets.zig | 4 +- src/print_zir.zig | 144 ++--- src/register_manager.zig | 4 +- src/tracy.zig | 6 +- src/translate_c.zig | 474 +++++++------- src/translate_c/ast.zig | 16 +- src/type.zig | 28 +- src/value.zig | 176 +++--- test/behavior/align.zig | 28 +- test/behavior/array.zig | 4 +- test/behavior/async_fn.zig | 10 +- test/behavior/atomics.zig | 2 +- test/behavior/basic.zig | 22 +- test/behavior/bit_shifting.zig | 6 +- test/behavior/bitcast.zig | 74 +-- test/behavior/bitreverse.zig | 28 +- test/behavior/bool.zig | 8 +- test/behavior/bugs/11995.zig | 2 +- test/behavior/bugs/12051.zig | 4 +- test/behavior/bugs/12119.zig | 2 +- test/behavior/bugs/12450.zig | 2 +- test/behavior/bugs/12723.zig | 2 +- test/behavior/bugs/13664.zig | 2 +- test/behavior/bugs/421.zig | 2 +- test/behavior/bugs/6781.zig | 8 +- test/behavior/bugs/718.zig | 2 +- test/behavior/bugs/726.zig | 4 +- ...n_functions_returning_void_or_noreturn.zig | 4 +- test/behavior/byteswap.zig | 32 +- test/behavior/call.zig | 2 +- test/behavior/cast.zig | 120 ++-- test/behavior/cast_int.zig | 2 +- test/behavior/comptime_memory.zig | 68 +- test/behavior/enum.zig | 36 +- test/behavior/error.zig | 4 +- test/behavior/eval.zig | 20 +- test/behavior/export.zig | 2 +- test/behavior/floatop.zig | 6 +- test/behavior/fn.zig | 8 +- test/behavior/fn_in_struct_in_comptime.zig | 2 +- test/behavior/for.zig | 10 +- test/behavior/generics.zig | 6 +- test/behavior/int128.zig | 16 +- test/behavior/math.zig | 20 +- test/behavior/memcpy.zig | 2 +- test/behavior/packed-struct.zig | 10 +- .../packed_struct_explicit_backing_int.zig | 2 +- test/behavior/pointers.zig | 24 +- test/behavior/popcount.zig | 2 +- test/behavior/ptrcast.zig | 44 +- test/behavior/ptrfromint.zig | 8 +- test/behavior/sizeof_and_typeof.zig | 4 +- test/behavior/slice.zig | 20 +- test/behavior/slice_sentinel_comptime.zig | 16 +- test/behavior/struct.zig | 20 +- test/behavior/switch.zig | 10 +- test/behavior/translate_c_macros.zig | 4 +- test/behavior/truncate.zig | 26 +- test/behavior/tuple.zig | 2 +- test/behavior/tuple_declarations.zig | 2 +- test/behavior/type.zig | 16 +- test/behavior/type_info.zig | 16 +- test/behavior/vector.zig | 2 +- test/c_abi/main.zig | 18 +- .../alignCast_expects_pointer_or_slice.zig | 5 +- .../bad_alignCast_at_comptime.zig | 6 +- ...tCast_same_size_but_bit_count_mismatch.zig | 4 +- .../compile_errors/bitCast_to_enum_type.zig | 6 +- ...h_different_sizes_inside_an_expression.zig | 4 +- ...ast_negative_value_to_unsigned_integer.zig | 2 +- ...mpile_log_a_pointer_to_an_opaque_value.zig | 2 +- .../compile_time_null_ptr_cast.zig | 2 +- .../compile_time_undef_ptr_cast.zig | 2 +- .../comptime_call_of_function_pointer.zig | 2 +- ...atch_memory_at_target_index_terminated.zig | 4 +- ...ch_memory_at_target_index_unterminated.zig | 4 +- ...entinel_does_not_match_target-sentinel.zig | 4 +- ...e-sentinel_is_out_of_bounds_terminated.zig | 4 +- ...sentinel_is_out_of_bounds_unterminated.zig | 4 +- ...n-exhaustive_enums_checks_int_in_range.zig | 4 +- ...field_count_range_but_not_matching_tag.zig | 4 +- ..._known_at_comptime_violates_error_sets.zig | 4 +- ...xplicitly_casting_non_tag_type_to_enum.zig | 2 +- ...comptime_field_ptr_not_based_on_struct.zig | 2 +- .../field_access_of_opaque_type.zig | 2 +- .../incorrect_type_to_memset_memcpy.zig | 4 +- .../increase_pointer_alignment_in_ptrCast.zig | 8 +- ...float_conversion_to_comptime_int-float.zig | 12 +- .../intFromFloat_comptime_safety.zig | 12 +- .../intFromPtr_0_to_non_optional_pointer.zig | 2 +- .../int_to_err_non_global_invalid_number.zig | 4 +- .../integer_cast_truncates_bits.zig | 2 +- .../integer_underflow_error.zig | 4 +- .../compile_errors/invalid_float_casts.zig | 16 +- .../compile_errors/invalid_int_casts.zig | 16 +- .../invalid_non-exhaustive_enum_to_union.zig | 6 +- ..._3818_bitcast_from_parray-slice_to_u16.zig | 12 +- ...es_from_comptime_reinterpreted_pointer.zig | 2 +- .../missing_builtin_arg_in_initializer.zig | 10 +- .../non_float_passed_to_intFromFloat.zig | 2 +- .../non_int_passed_to_floatFromInt.zig | 2 +- ..._comptime_float_passed_to_intFromFloat.zig | 2 +- .../ptrCast_discards_const_qualifier.zig | 6 +- .../ptrFromInt_non_ptr_type.zig | 10 +- .../ptrFromInt_with_misaligned_address.zig | 2 +- .../compile_errors/ptrcast_to_non-pointer.zig | 4 +- ...ading_past_end_of_pointer_casted_array.zig | 2 +- ...austive_enum_with_non-integer_tag_type.zig | 2 +- ...xhaustive_enum_with_undefined_tag_type.zig | 2 +- ...ce_cannot_have_its_bytes_reinterpreted.zig | 4 +- ...n_invalid_value_of_non-exhaustive_enum.zig | 4 +- .../compile_errors/truncate_sign_mismatch.zig | 16 +- ...ointer_coerced_to_pointer_to_opaque_{}.zig | 2 +- test/cases/enum_values.0.zig | 2 +- test/cases/enum_values.1.zig | 2 +- test/cases/error_in_nested_declaration.zig | 6 +- test/cases/int_to_ptr.0.zig | 4 +- test/cases/int_to_ptr.1.zig | 4 +- ...ment_address_space_reading_and_writing.zig | 2 +- test/cases/llvm/large_slices.zig | 2 +- test/cases/safety/@alignCast misaligned.zig | 3 +- .../@enumFromInt - no matching tag value.zig | 2 +- ...tCast error not present in destination.zig | 2 +- test/cases/safety/@intCast to u0.zig | 2 +- ...oat cannot fit - negative out of range.zig | 2 +- ...loat cannot fit - negative to unsigned.zig | 2 +- ...oat cannot fit - positive out of range.zig | 2 +- ...o to non-optional byte-aligned pointer.zig | 2 +- ...t address zero to non-optional pointer.zig | 2 +- .../@ptrFromInt with misaligned address.zig | 2 +- .../@tagName on corrupted enum value.zig | 2 +- .../@tagName on corrupted union value.zig | 2 +- ...inter casting to null function pointer.zig | 2 +- ...in cast to unsigned integer - widening.zig | 2 +- ...ot fitting in cast to unsigned integer.zig | 2 +- .../safety/signed-unsigned vector cast.zig | 2 +- ... sentinel mismatch - optional pointers.zig | 2 +- ...else on corrupt enum value - one prong.zig | 2 +- ...tch else on corrupt enum value - union.zig | 2 +- .../switch else on corrupt enum value.zig | 2 +- .../safety/switch on corrupted enum value.zig | 2 +- .../switch on corrupted union value.zig | 2 +- test/cases/safety/truncating vector cast.zig | 2 +- ...ast to signed integer - same bit count.zig | 2 +- .../safety/unsigned-signed vector cast.zig | 2 +- ...e does not fit in shortening cast - u0.zig | 2 +- .../value does not fit in shortening cast.zig | 2 +- test/cbe.zig | 10 +- test/compare_output.zig | 10 +- test/link/macho/dead_strip_dylibs/build.zig | 2 +- test/nvptx.zig | 2 +- test/standalone/hello_world/hello_libc.zig | 2 +- test/standalone/issue_11595/main.zig | 2 +- .../main_return_error/error_u8_non_zero.zig | 2 +- test/standalone/mix_c_files/main.zig | 2 +- test/standalone/pie/main.zig | 2 +- test/translate_c.zig | 140 ++--- tools/extract-grammar.zig | 2 +- tools/gen_spirv_spec.zig | 2 +- tools/gen_stubs.zig | 10 +- tools/update-linux-headers.zig | 2 +- tools/update_clang_options.zig | 4 +- 651 files changed, 9007 insertions(+), 9079 deletions(-) diff --git a/lib/compiler_rt/addf3.zig b/lib/compiler_rt/addf3.zig index 8edfef98381c..e72294a55f16 100644 --- a/lib/compiler_rt/addf3.zig +++ b/lib/compiler_rt/addf3.zig @@ -24,28 +24,28 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { const significandMask = (@as(Z, 1) << significandBits) - 1; const absMask = signBit - 1; - const qnanRep = @bitCast(Z, math.nan(T)) | quietBit; + const qnanRep = @as(Z, @bitCast(math.nan(T))) | quietBit; - var aRep = @bitCast(Z, a); - var bRep = @bitCast(Z, b); + var aRep = @as(Z, @bitCast(a)); + var bRep = @as(Z, @bitCast(b)); const aAbs = aRep & absMask; const bAbs = bRep & absMask; - const infRep = @bitCast(Z, math.inf(T)); + const infRep = @as(Z, @bitCast(math.inf(T))); // Detect if a or b is zero, infinity, or NaN. if (aAbs -% @as(Z, 1) >= infRep - @as(Z, 1) or bAbs -% @as(Z, 1) >= infRep - @as(Z, 1)) { // NaN + anything = qNaN - if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything + NaN = qNaN - if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // +/-infinity + -/+infinity = qNaN - if ((@bitCast(Z, a) ^ @bitCast(Z, b)) == signBit) { - return @bitCast(T, qnanRep); + if ((@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) == signBit) { + return @as(T, @bitCast(qnanRep)); } // +/-infinity + anything remaining = +/- infinity else { @@ -60,7 +60,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { if (aAbs == 0) { // but we need to get the sign right for zero + zero if (bAbs == 0) { - return @bitCast(T, @bitCast(Z, a) & @bitCast(Z, b)); + return @as(T, @bitCast(@as(Z, @bitCast(a)) & @as(Z, @bitCast(b)))); } else { return b; } @@ -78,8 +78,8 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { } // Extract the exponent and significand from the (possibly swapped) a and b. - var aExponent = @intCast(i32, (aRep >> significandBits) & maxExponent); - var bExponent = @intCast(i32, (bRep >> significandBits) & maxExponent); + var aExponent = @as(i32, @intCast((aRep >> significandBits) & maxExponent)); + var bExponent = @as(i32, @intCast((bRep >> significandBits) & maxExponent)); var aSignificand = aRep & significandMask; var bSignificand = bRep & significandMask; @@ -101,11 +101,11 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { // Shift the significand of b by the difference in exponents, with a sticky // bottom bit to get rounding correct. - const @"align" = @intCast(u32, aExponent - bExponent); + const @"align" = @as(u32, @intCast(aExponent - bExponent)); if (@"align" != 0) { if (@"align" < typeWidth) { - const sticky = if (bSignificand << @intCast(S, typeWidth - @"align") != 0) @as(Z, 1) else 0; - bSignificand = (bSignificand >> @truncate(S, @"align")) | sticky; + const sticky = if (bSignificand << @as(S, @intCast(typeWidth - @"align")) != 0) @as(Z, 1) else 0; + bSignificand = (bSignificand >> @as(S, @truncate(@"align"))) | sticky; } else { bSignificand = 1; // sticky; b is known to be non-zero. } @@ -113,13 +113,13 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { if (subtraction) { aSignificand -= bSignificand; // If a == -b, return +zero. - if (aSignificand == 0) return @bitCast(T, @as(Z, 0)); + if (aSignificand == 0) return @as(T, @bitCast(@as(Z, 0))); // If partial cancellation occured, we need to left-shift the result // and adjust the exponent: if (aSignificand < integerBit << 3) { - const shift = @intCast(i32, @clz(aSignificand)) - @intCast(i32, @clz(integerBit << 3)); - aSignificand <<= @intCast(S, shift); + const shift = @as(i32, @intCast(@clz(aSignificand))) - @as(i32, @intCast(@clz(integerBit << 3))); + aSignificand <<= @as(S, @intCast(shift)); aExponent -= shift; } } else { // addition @@ -135,13 +135,13 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { } // If we have overflowed the type, return +/- infinity: - if (aExponent >= maxExponent) return @bitCast(T, infRep | resultSign); + if (aExponent >= maxExponent) return @as(T, @bitCast(infRep | resultSign)); if (aExponent <= 0) { // Result is denormal; the exponent and round/sticky bits are zero. // All we need to do is shift the significand and apply the correct sign. - aSignificand >>= @intCast(S, 4 - aExponent); - return @bitCast(T, resultSign | aSignificand); + aSignificand >>= @as(S, @intCast(4 - aExponent)); + return @as(T, @bitCast(resultSign | aSignificand)); } // Low three bits are round, guard, and sticky. @@ -151,7 +151,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { var result = (aSignificand >> 3) & significandMask; // Insert the exponent and sign. - result |= @intCast(Z, aExponent) << significandBits; + result |= @as(Z, @intCast(aExponent)) << significandBits; result |= resultSign; // Final rounding. The result may overflow to infinity, but that is the @@ -164,7 +164,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T { if ((result >> significandBits) != 0) result |= integerBit; } - return @bitCast(T, result); + return @as(T, @bitCast(result)); } test { diff --git a/lib/compiler_rt/addf3_test.zig b/lib/compiler_rt/addf3_test.zig index 1df87a889fc6..c020ee0bc90a 100644 --- a/lib/compiler_rt/addf3_test.zig +++ b/lib/compiler_rt/addf3_test.zig @@ -5,7 +5,7 @@ const std = @import("std"); const math = std.math; -const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64); +const qnan128 = @as(f128, @bitCast(@as(u128, 0x7fff800000000000) << 64)); const __addtf3 = @import("addtf3.zig").__addtf3; const __addxf3 = @import("addxf3.zig").__addxf3; @@ -14,9 +14,9 @@ const __subtf3 = @import("subtf3.zig").__subtf3; fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { const x = __addtf3(a, b); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) { return; @@ -37,7 +37,7 @@ test "addtf3" { try test__addtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // NaN + any = NaN - try test__addtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); + try test__addtf3(@as(f128, @bitCast((@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000))), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // inf + inf = inf try test__addtf3(math.inf(f128), math.inf(f128), 0x7fff000000000000, 0x0); @@ -53,9 +53,9 @@ test "addtf3" { fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { const x = __subtf3(a, b); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) { return; @@ -77,7 +77,7 @@ test "subtf3" { try test__subtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // NaN + any = NaN - try test__subtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); + try test__subtf3(@as(f128, @bitCast((@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000))), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0); // inf - any = inf try test__subtf3(math.inf(f128), 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0); @@ -87,16 +87,16 @@ test "subtf3" { try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c); } -const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1))); +const qnan80 = @as(f80, @bitCast(@as(u80, @bitCast(math.nan(f80))) | (1 << (math.floatFractionalBits(f80) - 1)))); fn test__addxf3(a: f80, b: f80, expected: u80) !void { const x = __addxf3(a, b); - const rep = @bitCast(u80, x); + const rep = @as(u80, @bitCast(x)); if (rep == expected) return; - if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) + if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x)) return; // We don't currently test NaN payload propagation return error.TestFailed; @@ -104,33 +104,33 @@ fn test__addxf3(a: f80, b: f80, expected: u80) !void { test "addxf3" { // NaN + any = NaN - try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); - try test__addxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); + try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); + try test__addxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); // any + NaN = NaN - try test__addxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80)); - try test__addxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80)); + try test__addxf3(0x1.23456789abcdefp+5, qnan80, @as(u80, @bitCast(qnan80))); + try test__addxf3(0x1.23456789abcdefp+5, @as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), @as(u80, @bitCast(qnan80))); // NaN + inf = NaN - try test__addxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80)); + try test__addxf3(qnan80, math.inf(f80), @as(u80, @bitCast(qnan80))); // inf + NaN = NaN - try test__addxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80)); + try test__addxf3(math.inf(f80), qnan80, @as(u80, @bitCast(qnan80))); // inf + inf = inf - try test__addxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__addxf3(math.inf(f80), math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // inf + -inf = NaN - try test__addxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, qnan80)); + try test__addxf3(math.inf(f80), -math.inf(f80), @as(u80, @bitCast(qnan80))); // -inf + inf = NaN - try test__addxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, qnan80)); + try test__addxf3(-math.inf(f80), math.inf(f80), @as(u80, @bitCast(qnan80))); // inf + any = inf - try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80))); + try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @as(u80, @bitCast(math.inf(f80)))); // any + inf = inf - try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // any + any try test__addxf3(0x1.23456789abcdp+5, 0x1.dcba987654321p+5, 0x4005_BFFFFFFFFFFFC400); diff --git a/lib/compiler_rt/arm.zig b/lib/compiler_rt/arm.zig index b358fbfa804d..811bb88d4614 100644 --- a/lib/compiler_rt/arm.zig +++ b/lib/compiler_rt/arm.zig @@ -192,6 +192,6 @@ pub fn __aeabi_ldivmod() callconv(.Naked) void { } pub fn __aeabi_drsub(a: f64, b: f64) callconv(.AAPCS) f64 { - const neg_a = @bitCast(f64, @bitCast(u64, a) ^ (@as(u64, 1) << 63)); + const neg_a = @as(f64, @bitCast(@as(u64, @bitCast(a)) ^ (@as(u64, 1) << 63))); return b + neg_a; } diff --git a/lib/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig index de0c777d4505..8e2fdbb54b5e 100644 --- a/lib/compiler_rt/atomics.zig +++ b/lib/compiler_rt/atomics.zig @@ -232,16 +232,16 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T { const addr = @intFromPtr(ptr); const wide_addr = addr & ~(@as(T, smallest_atomic_fetch_exch_size) - 1); - const wide_ptr = @alignCast(smallest_atomic_fetch_exch_size, @ptrFromInt(*WideAtomic, wide_addr)); + const wide_ptr: *align(smallest_atomic_fetch_exch_size) WideAtomic = @alignCast(@as(*WideAtomic, @ptrFromInt(wide_addr))); const inner_offset = addr & (@as(T, smallest_atomic_fetch_exch_size) - 1); - const inner_shift = @intCast(std.math.Log2Int(T), inner_offset * 8); + const inner_shift = @as(std.math.Log2Int(T), @intCast(inner_offset * 8)); const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift; var wide_old = @atomicLoad(WideAtomic, wide_ptr, .SeqCst); while (true) { - const old = @truncate(T, (wide_old & mask) >> inner_shift); + const old = @as(T, @truncate((wide_old & mask) >> inner_shift)); const new = update(val, old); const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift); if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .SeqCst, .SeqCst)) |new_wide_old| { diff --git a/lib/compiler_rt/aulldiv.zig b/lib/compiler_rt/aulldiv.zig index 95e1f2eaa23d..1ce8f80c9fbc 100644 --- a/lib/compiler_rt/aulldiv.zig +++ b/lib/compiler_rt/aulldiv.zig @@ -21,9 +21,9 @@ pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 { const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; - const r = @bitCast(u64, an) / @bitCast(u64, bn); + const r = @as(u64, @bitCast(an)) / @as(u64, @bitCast(bn)); const s = s_a ^ s_b; - return (@bitCast(i64, r) ^ s) -% s; + return (@as(i64, @bitCast(r)) ^ s) -% s; } pub fn _aulldiv() callconv(.Naked) void { diff --git a/lib/compiler_rt/aullrem.zig b/lib/compiler_rt/aullrem.zig index 2bbcf6626c54..a87ec26475f9 100644 --- a/lib/compiler_rt/aullrem.zig +++ b/lib/compiler_rt/aullrem.zig @@ -21,9 +21,9 @@ pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 { const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; - const r = @bitCast(u64, an) % @bitCast(u64, bn); + const r = @as(u64, @bitCast(an)) % @as(u64, @bitCast(bn)); const s = s_a ^ s_b; - return (@bitCast(i64, r) ^ s) -% s; + return (@as(i64, @bitCast(r)) ^ s) -% s; } pub fn _aullrem() callconv(.Naked) void { diff --git a/lib/compiler_rt/ceil.zig b/lib/compiler_rt/ceil.zig index 2765ed9f741c..36ff3989161c 100644 --- a/lib/compiler_rt/ceil.zig +++ b/lib/compiler_rt/ceil.zig @@ -27,12 +27,12 @@ comptime { pub fn __ceilh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, ceilf(x)); + return @as(f16, @floatCast(ceilf(x))); } pub fn ceilf(x: f32) callconv(.C) f32 { - var u = @bitCast(u32, x); - var e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F; + var u = @as(u32, @bitCast(x)); + var e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F; var m: u32 = undefined; // TODO: Shouldn't need this explicit check. @@ -43,7 +43,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 { if (e >= 23) { return x; } else if (e >= 0) { - m = @as(u32, 0x007FFFFF) >> @intCast(u5, e); + m = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e)); if (u & m == 0) { return x; } @@ -52,7 +52,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 { u += m; } u &= ~m; - return @bitCast(f32, u); + return @as(f32, @bitCast(u)); } else { math.doNotOptimizeAway(x + 0x1.0p120); if (u >> 31 != 0) { @@ -66,7 +66,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 { pub fn ceil(x: f64) callconv(.C) f64 { const f64_toint = 1.0 / math.floatEps(f64); - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; var y: f64 = undefined; @@ -96,13 +96,13 @@ pub fn ceil(x: f64) callconv(.C) f64 { pub fn __ceilx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, ceilq(x)); + return @as(f80, @floatCast(ceilq(x))); } pub fn ceilq(x: f128) callconv(.C) f128 { const f128_toint = 1.0 / math.floatEps(f128); - const u = @bitCast(u128, x); + const u = @as(u128, @bitCast(x)); const e = (u >> 112) & 0x7FFF; var y: f128 = undefined; diff --git a/lib/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig index e39d726e0f45..f2f4fc9bc2a1 100644 --- a/lib/compiler_rt/clear_cache.zig +++ b/lib/compiler_rt/clear_cache.zig @@ -102,7 +102,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void { // If CTR_EL0.IDC is set, data cache cleaning to the point of unification // is not required for instruction to data coherence. if (((ctr_el0 >> 28) & 0x1) == 0x0) { - const dcache_line_size: usize = @as(usize, 4) << @intCast(u6, (ctr_el0 >> 16) & 15); + const dcache_line_size: usize = @as(usize, 4) << @as(u6, @intCast((ctr_el0 >> 16) & 15)); addr = start & ~(dcache_line_size - 1); while (addr < end) : (addr += dcache_line_size) { asm volatile ("dc cvau, %[addr]" @@ -115,7 +115,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void { // If CTR_EL0.DIC is set, instruction cache invalidation to the point of // unification is not required for instruction to data coherence. if (((ctr_el0 >> 29) & 0x1) == 0x0) { - const icache_line_size: usize = @as(usize, 4) << @intCast(u6, (ctr_el0 >> 0) & 15); + const icache_line_size: usize = @as(usize, 4) << @as(u6, @intCast((ctr_el0 >> 0) & 15)); addr = start & ~(icache_line_size - 1); while (addr < end) : (addr += icache_line_size) { asm volatile ("ic ivau, %[addr]" diff --git a/lib/compiler_rt/clzdi2_test.zig b/lib/compiler_rt/clzdi2_test.zig index 1f12b1bcd0a2..c713c35755ac 100644 --- a/lib/compiler_rt/clzdi2_test.zig +++ b/lib/compiler_rt/clzdi2_test.zig @@ -2,7 +2,7 @@ const clz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__clzdi2(a: u64, expected: i64) !void { - var x = @bitCast(i64, a); + var x = @as(i64, @bitCast(a)); var result = clz.__clzdi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/clzsi2_test.zig b/lib/compiler_rt/clzsi2_test.zig index fc0790ef7117..575952241c5c 100644 --- a/lib/compiler_rt/clzsi2_test.zig +++ b/lib/compiler_rt/clzsi2_test.zig @@ -4,8 +4,8 @@ const testing = @import("std").testing; fn test__clzsi2(a: u32, expected: i32) !void { const nakedClzsi2 = clz.__clzsi2; - const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2); - const x = @bitCast(i32, a); + const actualClzsi2 = @as(*const fn (a: i32) callconv(.C) i32, @ptrCast(&nakedClzsi2)); + const x = @as(i32, @bitCast(a)); const result = actualClzsi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/clzti2_test.zig b/lib/compiler_rt/clzti2_test.zig index 171c285a2754..ce0f26c32ec9 100644 --- a/lib/compiler_rt/clzti2_test.zig +++ b/lib/compiler_rt/clzti2_test.zig @@ -2,7 +2,7 @@ const clz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__clzti2(a: u128, expected: i64) !void { - var x = @bitCast(i128, a); + var x = @as(i128, @bitCast(a)); var result = clz.__clzti2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/cmptf2.zig b/lib/compiler_rt/cmptf2.zig index bee06522929e..4f8ecc73b9f9 100644 --- a/lib/compiler_rt/cmptf2.zig +++ b/lib/compiler_rt/cmptf2.zig @@ -75,30 +75,30 @@ fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 { } fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Equal; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Equal; } fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) != .Equal; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) != .Equal; } fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Less; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Less; } fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool { - return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Greater; + return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Greater; } fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool { - return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) { + return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) { .Equal, .Greater => true, .Less, .Unordered => false, }; } fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool { - return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) { + return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) { .Equal, .Less => true, .Greater, .Unordered => false, }; diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig index eaabffa07390..752a4a46df8a 100644 --- a/lib/compiler_rt/common.zig +++ b/lib/compiler_rt/common.zig @@ -102,22 +102,22 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void { u16 => { // 16x16 --> 32 bit multiply const product = @as(u32, a) * @as(u32, b); - hi.* = @intCast(u16, product >> 16); - lo.* = @truncate(u16, product); + hi.* = @as(u16, @intCast(product >> 16)); + lo.* = @as(u16, @truncate(product)); }, u32 => { // 32x32 --> 64 bit multiply const product = @as(u64, a) * @as(u64, b); - hi.* = @truncate(u32, product >> 32); - lo.* = @truncate(u32, product); + hi.* = @as(u32, @truncate(product >> 32)); + lo.* = @as(u32, @truncate(product)); }, u64 => { const S = struct { fn loWord(x: u64) u64 { - return @truncate(u32, x); + return @as(u32, @truncate(x)); } fn hiWord(x: u64) u64 { - return @truncate(u32, x >> 32); + return @as(u32, @truncate(x >> 32)); } }; // 64x64 -> 128 wide multiply for platforms that don't have such an operation; @@ -141,16 +141,16 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void { const Word_FullMask = @as(u64, 0xffffffffffffffff); const S = struct { fn Word_1(x: u128) u64 { - return @truncate(u32, x >> 96); + return @as(u32, @truncate(x >> 96)); } fn Word_2(x: u128) u64 { - return @truncate(u32, x >> 64); + return @as(u32, @truncate(x >> 64)); } fn Word_3(x: u128) u64 { - return @truncate(u32, x >> 32); + return @as(u32, @truncate(x >> 32)); } fn Word_4(x: u128) u64 { - return @truncate(u32, x); + return @as(u32, @truncate(x)); } }; // 128x128 -> 256 wide multiply for platforms that don't have such an operation; @@ -216,7 +216,7 @@ pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeIn const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T); const shift = @clz(significand.*) - @clz(integerBit); - significand.* <<= @intCast(std.math.Log2Int(Z), shift); + significand.* <<= @as(std.math.Log2Int(Z), @intCast(shift)); return @as(i32, 1) - shift; } @@ -228,8 +228,8 @@ pub inline fn fneg(a: anytype) @TypeOf(a) { .bits = bits, } }); const sign_bit_mask = @as(U, 1) << (bits - 1); - const negated = @bitCast(U, a) ^ sign_bit_mask; - return @bitCast(F, negated); + const negated = @as(U, @bitCast(a)) ^ sign_bit_mask; + return @as(F, @bitCast(negated)); } /// Allows to access underlying bits as two equally sized lower and higher diff --git a/lib/compiler_rt/comparef.zig b/lib/compiler_rt/comparef.zig index d4f4e0504d5a..94c19dcae406 100644 --- a/lib/compiler_rt/comparef.zig +++ b/lib/compiler_rt/comparef.zig @@ -26,12 +26,12 @@ pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT { const signBit = (@as(rep_t, 1) << (significandBits + exponentBits)); const absMask = signBit - 1; const infT = comptime std.math.inf(T); - const infRep = @bitCast(rep_t, infT); + const infRep = @as(rep_t, @bitCast(infT)); - const aInt = @bitCast(srep_t, a); - const bInt = @bitCast(srep_t, b); - const aAbs = @bitCast(rep_t, aInt) & absMask; - const bAbs = @bitCast(rep_t, bInt) & absMask; + const aInt = @as(srep_t, @bitCast(a)); + const bInt = @as(srep_t, @bitCast(b)); + const aAbs = @as(rep_t, @bitCast(aInt)) & absMask; + const bAbs = @as(rep_t, @bitCast(bInt)) & absMask; // If either a or b is NaN, they are unordered. if (aAbs > infRep or bAbs > infRep) return RT.Unordered; @@ -81,7 +81,7 @@ pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT { return .Equal; } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) { // signs are different - if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) { + if (@as(i16, @bitCast(a_rep.exp)) < @as(i16, @bitCast(b_rep.exp))) { return .Less; } else { return .Greater; @@ -104,10 +104,10 @@ pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 { const exponentBits = std.math.floatExponentBits(T); const signBit = (@as(rep_t, 1) << (significandBits + exponentBits)); const absMask = signBit - 1; - const infRep = @bitCast(rep_t, std.math.inf(T)); + const infRep = @as(rep_t, @bitCast(std.math.inf(T))); - const aAbs: rep_t = @bitCast(rep_t, a) & absMask; - const bAbs: rep_t = @bitCast(rep_t, b) & absMask; + const aAbs: rep_t = @as(rep_t, @bitCast(a)) & absMask; + const bAbs: rep_t = @as(rep_t, @bitCast(b)) & absMask; return @intFromBool(aAbs > infRep or bAbs > infRep); } diff --git a/lib/compiler_rt/cos.zig b/lib/compiler_rt/cos.zig index 029b6c346a3d..898c4fa64e8b 100644 --- a/lib/compiler_rt/cos.zig +++ b/lib/compiler_rt/cos.zig @@ -25,7 +25,7 @@ comptime { pub fn __cosh(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, cosf(a)); + return @as(f16, @floatCast(cosf(a))); } pub fn cosf(x: f32) callconv(.C) f32 { @@ -35,7 +35,7 @@ pub fn cosf(x: f32) callconv(.C) f32 { const c3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const c4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); const sign = ix >> 31 != 0; ix &= 0x7fffffff; @@ -86,7 +86,7 @@ pub fn cosf(x: f32) callconv(.C) f32 { } pub fn cos(x: f64) callconv(.C) f64 { - var ix = @bitCast(u64, x) >> 32; + var ix = @as(u64, @bitCast(x)) >> 32; ix &= 0x7fffffff; // |x| ~< pi/4 @@ -116,12 +116,12 @@ pub fn cos(x: f64) callconv(.C) f64 { pub fn __cosx(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, cosq(a)); + return @as(f80, @floatCast(cosq(a))); } pub fn cosq(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return cos(@floatCast(f64, a)); + return cos(@as(f64, @floatCast(a))); } pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig index e1500d010267..952d25e146a3 100644 --- a/lib/compiler_rt/count0bits.zig +++ b/lib/compiler_rt/count0bits.zig @@ -32,9 +32,9 @@ comptime { inline fn clzXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; var n: T = @bitSizeOf(T); @@ -49,7 +49,7 @@ inline fn clzXi2(comptime T: type, a: T) i32 { x = y; } } - return @intCast(i32, n - @bitCast(T, x)); + return @as(i32, @intCast(n - @as(T, @bitCast(x)))); } fn __clzsi2_thumb1() callconv(.Naked) void { @@ -169,9 +169,9 @@ pub fn __clzti2(a: i128) callconv(.C) i32 { inline fn ctzXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; var n: T = 1; @@ -187,7 +187,7 @@ inline fn ctzXi2(comptime T: type, a: T) i32 { x = x >> shift; } } - return @intCast(i32, n - @bitCast(T, (x & 1))); + return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))); } pub fn __ctzsi2(a: i32) callconv(.C) i32 { @@ -204,9 +204,9 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 { inline fn ffsXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; var n: T = 1; @@ -224,7 +224,7 @@ inline fn ffsXi2(comptime T: type, a: T) i32 { } } // return ctz + 1 - return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1); + return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))) + @as(i32, 1); } pub fn __ffssi2(a: i32) callconv(.C) i32 { diff --git a/lib/compiler_rt/ctzdi2_test.zig b/lib/compiler_rt/ctzdi2_test.zig index 4ee1dc0f7832..f5b7139b0efc 100644 --- a/lib/compiler_rt/ctzdi2_test.zig +++ b/lib/compiler_rt/ctzdi2_test.zig @@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ctzdi2(a: u64, expected: i32) !void { - var x = @bitCast(i64, a); + var x = @as(i64, @bitCast(a)); var result = ctz.__ctzdi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ctzsi2_test.zig b/lib/compiler_rt/ctzsi2_test.zig index 5d9e01f0df99..d30a15daac43 100644 --- a/lib/compiler_rt/ctzsi2_test.zig +++ b/lib/compiler_rt/ctzsi2_test.zig @@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ctzsi2(a: u32, expected: i32) !void { - var x = @bitCast(i32, a); + var x = @as(i32, @bitCast(a)); var result = ctz.__ctzsi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ctzti2_test.zig b/lib/compiler_rt/ctzti2_test.zig index 4b7fbf8b1ccf..2d509f598821 100644 --- a/lib/compiler_rt/ctzti2_test.zig +++ b/lib/compiler_rt/ctzti2_test.zig @@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ctzti2(a: u128, expected: i32) !void { - var x = @bitCast(i128, a); + var x = @as(i128, @bitCast(a)); var result = ctz.__ctzti2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig index c71eed6d0fd7..f6e65f743d8f 100644 --- a/lib/compiler_rt/divdf3.zig +++ b/lib/compiler_rt/divdf3.zig @@ -47,52 +47,52 @@ inline fn div(a: f64, b: f64) f64 { const absMask = signBit - 1; const exponentMask = absMask ^ significandMask; const qnanRep = exponentMask | quietBit; - const infRep = @bitCast(Z, std.math.inf(f64)); + const infRep = @as(Z, @bitCast(std.math.inf(f64))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(f64, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(f64, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(f64, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(f64, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(f64, qnanRep); + return @as(f64, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(f64, aAbs | quotientSign); + return @as(f64, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(f64, quotientSign); + if (bAbs == infRep) return @as(f64, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(f64, qnanRep); + return @as(f64, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(f64, quotientSign); + return @as(f64, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(f64, infRep | quotientSign); + if (bAbs == 0) return @as(f64, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -106,13 +106,13 @@ inline fn div(a: f64, b: f64) f64 { // won't hurt anything.) aSignificand |= implicitBit; bSignificand |= implicitBit; - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q31 fixed-point number in the range // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. - const q31b: u32 = @truncate(u32, bSignificand >> 21); + const q31b: u32 = @as(u32, @truncate(bSignificand >> 21)); var recip32 = @as(u32, 0x7504f333) -% q31b; // Now refine the reciprocal estimate using a Newton-Raphson iteration: @@ -123,12 +123,12 @@ inline fn div(a: f64, b: f64) f64 { // with each iteration, so after three iterations, we have about 28 binary // digits of accuracy. var correction32: u32 = undefined; - correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1); - recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31); - correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1); - recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31); - correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1); - recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31); + correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1)); + recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31)); + correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1)); + recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31)); + correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1)); + recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31)); // recip32 might have overflowed to exactly zero in the preceding // computation if the high word of b is exactly 1.0. This would sabotage @@ -138,12 +138,12 @@ inline fn div(a: f64, b: f64) f64 { // We need to perform one more iteration to get us to 56 binary digits; // The last iteration needs to happen with extra precision. - const q63blo: u32 = @truncate(u32, bSignificand << 11); + const q63blo: u32 = @as(u32, @truncate(bSignificand << 11)); var correction: u64 = undefined; var reciprocal: u64 = undefined; correction = ~(@as(u64, recip32) *% q31b +% (@as(u64, recip32) *% q63blo >> 32)) +% 1; - const cHi = @truncate(u32, correction >> 32); - const cLo = @truncate(u32, correction); + const cHi = @as(u32, @truncate(correction >> 32)); + const cLo = @as(u32, @truncate(correction)); reciprocal = @as(u64, recip32) *% cHi +% (@as(u64, recip32) *% cLo >> 32); // We already adjusted the 32-bit estimate, now we need to adjust the final @@ -195,7 +195,7 @@ inline fn div(a: f64, b: f64) f64 { if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(f64, infRep | quotientSign); + return @as(f64, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. @@ -206,22 +206,22 @@ inline fn div(a: f64, b: f64) f64 { absResult += round; if ((absResult & ~significandMask) != 0) { // The rounded result is normal; return it. - return @bitCast(f64, absResult | quotientSign); + return @as(f64, @bitCast(absResult | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(f64, quotientSign); + return @as(f64, @bitCast(quotientSign)); } else { const round = @intFromBool((residual << 1) > bSignificand); // Clear the implicit bit var absResult = quotient & significandMask; // Insert the exponent - absResult |= @bitCast(Z, @as(SignedZ, writtenExponent)) << significandBits; + absResult |= @as(Z, @bitCast(@as(SignedZ, writtenExponent))) << significandBits; // Round absResult +%= round; // Insert the sign and return - return @bitCast(f64, absResult | quotientSign); + return @as(f64, @bitCast(absResult | quotientSign)); } } diff --git a/lib/compiler_rt/divdf3_test.zig b/lib/compiler_rt/divdf3_test.zig index 28cb0bc4dfa5..93839e1bf72d 100644 --- a/lib/compiler_rt/divdf3_test.zig +++ b/lib/compiler_rt/divdf3_test.zig @@ -6,7 +6,7 @@ const __divdf3 = @import("divdf3.zig").__divdf3; const testing = @import("std").testing; fn compareResultD(result: f64, expected: u64) bool { - const rep = @bitCast(u64, result); + const rep = @as(u64, @bitCast(result)); if (rep == expected) { return true; diff --git a/lib/compiler_rt/divhf3.zig b/lib/compiler_rt/divhf3.zig index 6bb607bef935..eaed9d1c2f2f 100644 --- a/lib/compiler_rt/divhf3.zig +++ b/lib/compiler_rt/divhf3.zig @@ -7,5 +7,5 @@ comptime { pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, divsf3.__divsf3(a, b)); + return @as(f16, @floatCast(divsf3.__divsf3(a, b))); } diff --git a/lib/compiler_rt/divsf3.zig b/lib/compiler_rt/divsf3.zig index d35220ca26bf..9e5ade823490 100644 --- a/lib/compiler_rt/divsf3.zig +++ b/lib/compiler_rt/divsf3.zig @@ -44,52 +44,52 @@ inline fn div(a: f32, b: f32) f32 { const absMask = signBit - 1; const exponentMask = absMask ^ significandMask; const qnanRep = exponentMask | quietBit; - const infRep = @bitCast(Z, std.math.inf(f32)); + const infRep = @as(Z, @bitCast(std.math.inf(f32))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(f32, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(f32, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(f32, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(f32, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(f32, qnanRep); + return @as(f32, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(f32, aAbs | quotientSign); + return @as(f32, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(f32, quotientSign); + if (bAbs == infRep) return @as(f32, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(f32, qnanRep); + return @as(f32, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(f32, quotientSign); + return @as(f32, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(f32, infRep | quotientSign); + if (bAbs == 0) return @as(f32, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -103,7 +103,7 @@ inline fn div(a: f32, b: f32) f32 { // won't hurt anything.) aSignificand |= implicitBit; bSignificand |= implicitBit; - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q31 fixed-point number in the range // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax @@ -120,12 +120,12 @@ inline fn div(a: f32, b: f32) f32 { // with each iteration, so after three iterations, we have about 28 binary // digits of accuracy. var correction: u32 = undefined; - correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1); - reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31); - correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1); - reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31); - correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1); - reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31); + correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1)); + reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31)); + correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1)); + reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31)); + correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1)); + reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31)); // Exhaustive testing shows that the error in reciprocal after three steps // is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our @@ -147,7 +147,7 @@ inline fn div(a: f32, b: f32) f32 { // is the error in the reciprocal of b scaled by the maximum // possible value of a. As a consequence of this error bound, // either q or nextafter(q) is the correctly rounded - var quotient: Z = @truncate(u32, @as(u64, reciprocal) *% (aSignificand << 1) >> 32); + var quotient: Z = @as(u32, @truncate(@as(u64, reciprocal) *% (aSignificand << 1) >> 32)); // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). // In either case, we are going to compute a residual of the form @@ -175,7 +175,7 @@ inline fn div(a: f32, b: f32) f32 { if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(f32, infRep | quotientSign); + return @as(f32, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. @@ -186,22 +186,22 @@ inline fn div(a: f32, b: f32) f32 { absResult += round; if ((absResult & ~significandMask) > 0) { // The rounded result is normal; return it. - return @bitCast(f32, absResult | quotientSign); + return @as(f32, @bitCast(absResult | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(f32, quotientSign); + return @as(f32, @bitCast(quotientSign)); } else { const round = @intFromBool((residual << 1) > bSignificand); // Clear the implicit bit var absResult = quotient & significandMask; // Insert the exponent - absResult |= @bitCast(Z, writtenExponent) << significandBits; + absResult |= @as(Z, @bitCast(writtenExponent)) << significandBits; // Round absResult +%= round; // Insert the sign and return - return @bitCast(f32, absResult | quotientSign); + return @as(f32, @bitCast(absResult | quotientSign)); } } diff --git a/lib/compiler_rt/divsf3_test.zig b/lib/compiler_rt/divsf3_test.zig index 0c06d4c15a4e..ff562fe54e61 100644 --- a/lib/compiler_rt/divsf3_test.zig +++ b/lib/compiler_rt/divsf3_test.zig @@ -6,7 +6,7 @@ const __divsf3 = @import("divsf3.zig").__divsf3; const testing = @import("std").testing; fn compareResultF(result: f32, expected: u32) bool { - const rep = @bitCast(u32, result); + const rep = @as(u32, @bitCast(result)); if (rep == expected) { return true; diff --git a/lib/compiler_rt/divtf3.zig b/lib/compiler_rt/divtf3.zig index 86a2f30cc82e..b979cfce96e8 100644 --- a/lib/compiler_rt/divtf3.zig +++ b/lib/compiler_rt/divtf3.zig @@ -41,52 +41,52 @@ inline fn div(a: f128, b: f128) f128 { const absMask = signBit - 1; const exponentMask = absMask ^ significandMask; const qnanRep = exponentMask | quietBit; - const infRep = @bitCast(Z, std.math.inf(f128)); + const infRep = @as(Z, @bitCast(std.math.inf(f128))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(f128, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(f128, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(f128, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(f128, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(f128, qnanRep); + return @as(f128, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(f128, aAbs | quotientSign); + return @as(f128, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(f128, quotientSign); + if (bAbs == infRep) return @as(f128, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(f128, qnanRep); + return @as(f128, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(f128, quotientSign); + return @as(f128, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(f128, infRep | quotientSign); + if (bAbs == 0) return @as(f128, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -100,13 +100,13 @@ inline fn div(a: f128, b: f128) f128 { // won't hurt anything. aSignificand |= implicitBit; bSignificand |= implicitBit; - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q63 fixed-point number in the range // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. - const q63b = @truncate(u64, bSignificand >> 49); + const q63b = @as(u64, @truncate(bSignificand >> 49)); var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b; // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2) @@ -117,16 +117,16 @@ inline fn div(a: f128, b: f128) f128 { // This doubles the number of correct binary digits in the approximation // with each iteration. var correction64: u64 = undefined; - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); // The reciprocal may have overflowed to zero if the upper half of b is // exactly 1.0. This would sabatoge the full-width final stage of the @@ -135,7 +135,7 @@ inline fn div(a: f128, b: f128) f128 { // We need to perform one more iteration to get us to 112 binary digits; // The last iteration needs to happen with extra precision. - const q127blo: u64 = @truncate(u64, bSignificand << 15); + const q127blo: u64 = @as(u64, @truncate(bSignificand << 15)); var correction: u128 = undefined; var reciprocal: u128 = undefined; @@ -151,8 +151,8 @@ inline fn div(a: f128, b: f128) f128 { correction = -%(r64q63 + (r64q127 >> 64)); - const cHi = @truncate(u64, correction >> 64); - const cLo = @truncate(u64, correction); + const cHi = @as(u64, @truncate(correction >> 64)); + const cLo = @as(u64, @truncate(correction)); wideMultiply(u128, recip64, cHi, &dummy, &r64cH); wideMultiply(u128, recip64, cLo, &dummy, &r64cL); @@ -210,7 +210,7 @@ inline fn div(a: f128, b: f128) f128 { if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(f128, infRep | quotientSign); + return @as(f128, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. @@ -221,22 +221,22 @@ inline fn div(a: f128, b: f128) f128 { absResult += round; if ((absResult & ~significandMask) > 0) { // The rounded result is normal; return it. - return @bitCast(f128, absResult | quotientSign); + return @as(f128, @bitCast(absResult | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(f128, quotientSign); + return @as(f128, @bitCast(quotientSign)); } else { const round = @intFromBool((residual << 1) >= bSignificand); // Clear the implicit bit var absResult = quotient & significandMask; // Insert the exponent - absResult |= @intCast(Z, writtenExponent) << significandBits; + absResult |= @as(Z, @intCast(writtenExponent)) << significandBits; // Round absResult +%= round; // Insert the sign and return - return @bitCast(f128, absResult | quotientSign); + return @as(f128, @bitCast(absResult | quotientSign)); } } diff --git a/lib/compiler_rt/divtf3_test.zig b/lib/compiler_rt/divtf3_test.zig index 62204057d480..43413a918187 100644 --- a/lib/compiler_rt/divtf3_test.zig +++ b/lib/compiler_rt/divtf3_test.zig @@ -5,9 +5,9 @@ const testing = std.testing; const __divtf3 = @import("divtf3.zig").__divtf3; fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool { - const rep = @bitCast(u128, result); - const hi = @truncate(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(result)); + const hi = @as(u64, @truncate(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expectedHi and lo == expectedLo) { return true; diff --git a/lib/compiler_rt/divti3.zig b/lib/compiler_rt/divti3.zig index 31302aab4d08..43bb951ab950 100644 --- a/lib/compiler_rt/divti3.zig +++ b/lib/compiler_rt/divti3.zig @@ -21,7 +21,7 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 { const v128 = @Vector(2, u64); fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 { - return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b))); + return @as(v128, @bitCast(div(@as(i128, @bitCast(a)), @as(i128, @bitCast(b))))); } inline fn div(a: i128, b: i128) i128 { @@ -31,9 +31,9 @@ inline fn div(a: i128, b: i128) i128 { const an = (a ^ s_a) -% s_a; const bn = (b ^ s_b) -% s_b; - const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null); + const r = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), null); const s = s_a ^ s_b; - return (@bitCast(i128, r) ^ s) -% s; + return (@as(i128, @bitCast(r)) ^ s) -% s; } test { diff --git a/lib/compiler_rt/divti3_test.zig b/lib/compiler_rt/divti3_test.zig index 7992e4312fac..bcf45da3f23c 100644 --- a/lib/compiler_rt/divti3_test.zig +++ b/lib/compiler_rt/divti3_test.zig @@ -14,8 +14,8 @@ test "divti3" { try test__divti3(-2, 1, -2); try test__divti3(-2, -1, 2); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), 1, @bitCast(i128, @as(u128, 0x8 << 124))); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), -1, @bitCast(i128, @as(u128, 0x8 << 124))); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), -2, @bitCast(i128, @as(u128, 0x4 << 124))); - try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), 2, @bitCast(i128, @as(u128, 0xc << 124))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), 1, @as(i128, @bitCast(@as(u128, 0x8 << 124)))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), -1, @as(i128, @bitCast(@as(u128, 0x8 << 124)))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), -2, @as(i128, @bitCast(@as(u128, 0x4 << 124)))); + try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), 2, @as(i128, @bitCast(@as(u128, 0xc << 124)))); } diff --git a/lib/compiler_rt/divxf3.zig b/lib/compiler_rt/divxf3.zig index f0e93fa3be95..d8e8a0c76dcb 100644 --- a/lib/compiler_rt/divxf3.zig +++ b/lib/compiler_rt/divxf3.zig @@ -29,53 +29,53 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { const significandMask = (@as(Z, 1) << significandBits) - 1; const absMask = signBit - 1; - const qnanRep = @bitCast(Z, std.math.nan(T)) | quietBit; - const infRep = @bitCast(Z, std.math.inf(T)); + const qnanRep = @as(Z, @bitCast(std.math.nan(T))) | quietBit; + const infRep = @as(Z, @bitCast(std.math.inf(T))); - const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent); - const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: Z = @bitCast(Z, a) & significandMask; - var bSignificand: Z = @bitCast(Z, b) & significandMask; + var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask; + var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask; var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN / anything = qNaN - if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything / NaN = qNaN - if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) { - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } // infinity / anything else = +/- infinity else { - return @bitCast(T, aAbs | quotientSign); + return @as(T, @bitCast(aAbs | quotientSign)); } } // anything else / infinity = +/- 0 - if (bAbs == infRep) return @bitCast(T, quotientSign); + if (bAbs == infRep) return @as(T, @bitCast(quotientSign)); if (aAbs == 0) { // zero / zero = NaN if (bAbs == 0) { - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } // zero / anything else = +/- zero else { - return @bitCast(T, quotientSign); + return @as(T, @bitCast(quotientSign)); } } // anything else / zero = +/- infinity - if (bAbs == 0) return @bitCast(T, infRep | quotientSign); + if (bAbs == 0) return @as(T, @bitCast(infRep | quotientSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -83,13 +83,13 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { if (aAbs < integerBit) scale +%= normalize(T, &aSignificand); if (bAbs < integerBit) scale -%= normalize(T, &bSignificand); } - var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale; + var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale; // Align the significand of b as a Q63 fixed-point number in the range // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. - const q63b = @intCast(u64, bSignificand); + const q63b = @as(u64, @intCast(bSignificand)); var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b; // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2) @@ -100,16 +100,16 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { // This doubles the number of correct binary digits in the approximation // with each iteration. var correction64: u64 = undefined; - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); - correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1); - recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); + correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1)); + recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63)); // The reciprocal may have overflowed to zero if the upper half of b is // exactly 1.0. This would sabatoge the full-width final stage of the @@ -128,8 +128,8 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { correction = -%correction; - const cHi = @truncate(u64, correction >> 64); - const cLo = @truncate(u64, correction); + const cHi = @as(u64, @truncate(correction >> 64)); + const cLo = @as(u64, @truncate(correction)); var r64cH: u128 = undefined; var r64cL: u128 = undefined; @@ -164,8 +164,8 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { // exponent accordingly. var quotient: u64 = if (quotient128 < (integerBit << 1)) b: { quotientExponent -= 1; - break :b @intCast(u64, quotient128); - } else @intCast(u64, quotient128 >> 1); + break :b @as(u64, @intCast(quotient128)); + } else @as(u64, @intCast(quotient128 >> 1)); // We are going to compute a residual of the form // @@ -182,26 +182,26 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 { const writtenExponent = quotientExponent + exponentBias; if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. - return @bitCast(T, infRep | quotientSign); + return @as(T, @bitCast(infRep | quotientSign)); } else if (writtenExponent < 1) { if (writtenExponent == 0) { // Check whether the rounded result is normal. if (residual > (bSignificand >> 1)) { // round if (quotient == (integerBit - 1)) // If the rounded result is normal, return it - return @bitCast(T, @bitCast(Z, std.math.floatMin(T)) | quotientSign); + return @as(T, @bitCast(@as(Z, @bitCast(std.math.floatMin(T))) | quotientSign)); } } // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. - return @bitCast(T, quotientSign); + return @as(T, @bitCast(quotientSign)); } else { const round = @intFromBool(residual > (bSignificand >> 1)); // Insert the exponent - var absResult = quotient | (@intCast(Z, writtenExponent) << significandBits); + var absResult = quotient | (@as(Z, @intCast(writtenExponent)) << significandBits); // Round absResult +%= round; // Insert the sign and return - return @bitCast(T, absResult | quotientSign | integerBit); + return @as(T, @bitCast(absResult | quotientSign | integerBit)); } } diff --git a/lib/compiler_rt/divxf3_test.zig b/lib/compiler_rt/divxf3_test.zig index 0ed2b7421736..ff1cef089bcd 100644 --- a/lib/compiler_rt/divxf3_test.zig +++ b/lib/compiler_rt/divxf3_test.zig @@ -5,11 +5,11 @@ const testing = std.testing; const __divxf3 = @import("divxf3.zig").__divxf3; fn compareResult(result: f80, expected: u80) bool { - const rep = @bitCast(u80, result); + const rep = @as(u80, @bitCast(result)); if (rep == expected) return true; // test other possible NaN representations (signal NaN) - if (math.isNan(result) and math.isNan(@bitCast(f80, expected))) return true; + if (math.isNan(result) and math.isNan(@as(f80, @bitCast(expected)))) return true; return false; } @@ -25,9 +25,9 @@ fn test__divxf3(a: f80, b: f80) !void { const x = __divxf3(a, b); // Next float (assuming normal, non-zero result) - const x_plus_eps = @bitCast(f80, (@bitCast(u80, x) + 1) | integerBit); + const x_plus_eps = @as(f80, @bitCast((@as(u80, @bitCast(x)) + 1) | integerBit)); // Prev float (assuming normal, non-zero result) - const x_minus_eps = @bitCast(f80, (@bitCast(u80, x) - 1) | integerBit); + const x_minus_eps = @as(f80, @bitCast((@as(u80, @bitCast(x)) - 1) | integerBit)); // Make sure result is more accurate than the adjacent floats const err_x = @fabs(@mulAdd(f80, x, b, -a)); diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig index 47c71efadd9c..70eb47907015 100644 --- a/lib/compiler_rt/emutls.zig +++ b/lib/compiler_rt/emutls.zig @@ -33,18 +33,14 @@ pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque { const simple_allocator = struct { /// Allocate a memory chunk for requested type. Return a pointer on the data. pub fn alloc(comptime T: type) *T { - return @ptrCast(*T, @alignCast( - @alignOf(T), - advancedAlloc(@alignOf(T), @sizeOf(T)), - )); + return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T)))); } /// Allocate a slice of T, with len elements. pub fn allocSlice(comptime T: type, len: usize) []T { - return @ptrCast([*]T, @alignCast( - @alignOf(T), + return @as([*]T, @ptrCast(@alignCast( advancedAlloc(@alignOf(T), @sizeOf(T) * len), - ))[0 .. len - 1]; + )))[0 .. len - 1]; } /// Allocate a memory chunk. @@ -56,22 +52,19 @@ const simple_allocator = struct { abort(); } - return @ptrCast([*]u8, aligned_ptr); + return @as([*]u8, @ptrCast(aligned_ptr)); } /// Resize a slice. pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T { - var c_ptr: *anyopaque = @ptrCast(*anyopaque, slice.ptr); - var new_array: [*]T = @ptrCast([*]T, @alignCast( - @alignOf(T), - std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort(), - )); + var c_ptr: *anyopaque = @as(*anyopaque, @ptrCast(slice.ptr)); + var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort())); return new_array[0..len]; } /// Free a memory chunk allocated with simple_allocator. pub fn free(ptr: anytype) void { - std.c.free(@ptrCast(*anyopaque, ptr)); + std.c.free(@as(*anyopaque, @ptrCast(ptr))); } }; @@ -132,20 +125,20 @@ const ObjectArray = struct { if (self.slots[index] == null) { // initialize the slot const size = control.size; - const alignment = @truncate(u29, control.alignment); + const alignment = @as(u29, @truncate(control.alignment)); var data = simple_allocator.advancedAlloc(alignment, size); errdefer simple_allocator.free(data); if (control.default_value) |value| { // default value: copy the content to newly allocated object. - @memcpy(data[0..size], @ptrCast([*]const u8, value)); + @memcpy(data[0..size], @as([*]const u8, @ptrCast(value))); } else { // no default: return zeroed memory. @memset(data[0..size], 0); } - self.slots[index] = @ptrCast(*anyopaque, data); + self.slots[index] = @as(*anyopaque, @ptrCast(data)); } return self.slots[index].?; @@ -180,18 +173,12 @@ const current_thread_storage = struct { /// Return casted thread specific value. fn getspecific() ?*ObjectArray { - return @ptrCast( - ?*ObjectArray, - @alignCast( - @alignOf(ObjectArray), - std.c.pthread_getspecific(current_thread_storage.key), - ), - ); + return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key))); } /// Set casted thread specific value. fn setspecific(new: ?*ObjectArray) void { - if (std.c.pthread_setspecific(current_thread_storage.key, @ptrCast(*anyopaque, new)) != 0) { + if (std.c.pthread_setspecific(current_thread_storage.key, @as(*anyopaque, @ptrCast(new))) != 0) { abort(); } } @@ -205,10 +192,7 @@ const current_thread_storage = struct { /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer. fn deinit(arrayPtr: *anyopaque) callconv(.C) void { - var array = @ptrCast( - *ObjectArray, - @alignCast(@alignOf(ObjectArray), arrayPtr), - ); + var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr)); array.deinit(); } }; @@ -294,7 +278,7 @@ const emutls_control = extern struct { .size = @sizeOf(T), .alignment = @alignOf(T), .object = .{ .index = 0 }, - .default_value = @ptrCast(?*const anyopaque, default_value), + .default_value = @as(?*const anyopaque, @ptrCast(default_value)), }; } @@ -313,10 +297,7 @@ const emutls_control = extern struct { pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T { assert(self.size == @sizeOf(T)); assert(self.alignment == @alignOf(T)); - return @ptrCast( - *T, - @alignCast(@alignOf(T), self.getPointer()), - ); + return @ptrCast(@alignCast(self.getPointer())); } }; @@ -343,7 +324,7 @@ test "__emutls_get_address zeroed" { try expect(ctl.object.index == 0); // retrieve a variable from ctl - var x = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(ctl.object.index != 0); // index has been allocated for this ctl try expect(x.* == 0); // storage has been zeroed @@ -351,7 +332,7 @@ test "__emutls_get_address zeroed" { x.* = 1234; // retrieve a variable from ctl (same ctl) - var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(y.* == 1234); // same content that x.* try expect(x == y); // same pointer @@ -364,7 +345,7 @@ test "__emutls_get_address with default_value" { var ctl = emutls_control.init(usize, &value); try expect(ctl.object.index == 0); - var x: *usize = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(ctl.object.index != 0); try expect(x.* == 5678); // storage initialized with default value @@ -373,7 +354,7 @@ test "__emutls_get_address with default_value" { try expect(value == 5678); // the default value didn't change - var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); + var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(y.* == 9012); // the modified storage persists } diff --git a/lib/compiler_rt/exp.zig b/lib/compiler_rt/exp.zig index 32a1a84ff999..337376f7fe4d 100644 --- a/lib/compiler_rt/exp.zig +++ b/lib/compiler_rt/exp.zig @@ -27,7 +27,7 @@ comptime { pub fn __exph(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, expf(a)); + return @as(f16, @floatCast(expf(a))); } pub fn expf(x_: f32) callconv(.C) f32 { @@ -39,8 +39,8 @@ pub fn expf(x_: f32) callconv(.C) f32 { const P2 = -2.7667332906e-3; var x = x_; - var hx = @bitCast(u32, x); - const sign = @intCast(i32, hx >> 31); + var hx = @as(u32, @bitCast(x)); + const sign = @as(i32, @intCast(hx >> 31)); hx &= 0x7FFFFFFF; if (math.isNan(x)) { @@ -74,12 +74,12 @@ pub fn expf(x_: f32) callconv(.C) f32 { if (hx > 0x3EB17218) { // |x| > 1.5 * ln2 if (hx > 0x3F851592) { - k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]); + k = @as(i32, @intFromFloat(invln2 * x + half[@as(usize, @intCast(sign))])); } else { k = 1 - sign - sign; } - const fk = @floatFromInt(f32, k); + const fk = @as(f32, @floatFromInt(k)); hi = x - fk * ln2hi; lo = fk * ln2lo; x = hi - lo; @@ -117,9 +117,9 @@ pub fn exp(x_: f64) callconv(.C) f64 { const P5: f64 = 4.13813679705723846039e-08; var x = x_; - var ux = @bitCast(u64, x); + var ux = @as(u64, @bitCast(x)); var hx = ux >> 32; - const sign = @intCast(i32, hx >> 31); + const sign = @as(i32, @intCast(hx >> 31)); hx &= 0x7FFFFFFF; if (math.isNan(x)) { @@ -157,12 +157,12 @@ pub fn exp(x_: f64) callconv(.C) f64 { if (hx > 0x3FD62E42) { // |x| >= 1.5 * ln2 if (hx > 0x3FF0A2B2) { - k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]); + k = @as(i32, @intFromFloat(invln2 * x + half[@as(usize, @intCast(sign))])); } else { k = 1 - sign - sign; } - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); hi = x - dk * ln2hi; lo = dk * ln2lo; x = hi - lo; @@ -191,12 +191,12 @@ pub fn exp(x_: f64) callconv(.C) f64 { pub fn __expx(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, expq(a)); + return @as(f80, @floatCast(expq(a))); } pub fn expq(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return exp(@floatCast(f64, a)); + return exp(@as(f64, @floatCast(a))); } pub fn expl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/exp2.zig b/lib/compiler_rt/exp2.zig index 731fd7013d46..acfeff0e353b 100644 --- a/lib/compiler_rt/exp2.zig +++ b/lib/compiler_rt/exp2.zig @@ -27,18 +27,18 @@ comptime { pub fn __exp2h(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, exp2f(x)); + return @as(f16, @floatCast(exp2f(x))); } pub fn exp2f(x: f32) callconv(.C) f32 { - const tblsiz = @intCast(u32, exp2ft.len); - const redux: f32 = 0x1.8p23 / @floatFromInt(f32, tblsiz); + const tblsiz = @as(u32, @intCast(exp2ft.len)); + const redux: f32 = 0x1.8p23 / @as(f32, @floatFromInt(tblsiz)); const P1: f32 = 0x1.62e430p-1; const P2: f32 = 0x1.ebfbe0p-3; const P3: f32 = 0x1.c6b348p-5; const P4: f32 = 0x1.3b2c9cp-7; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); const ix = u & 0x7FFFFFFF; // |x| > 126 @@ -72,32 +72,32 @@ pub fn exp2f(x: f32) callconv(.C) f32 { // intended result but should confirm how GCC/Clang handle this to ensure. var uf = x + redux; - var i_0 = @bitCast(u32, uf); + var i_0 = @as(u32, @bitCast(uf)); i_0 +%= tblsiz / 2; const k = i_0 / tblsiz; - const uk = @bitCast(f64, @as(u64, 0x3FF + k) << 52); + const uk = @as(f64, @bitCast(@as(u64, 0x3FF + k) << 52)); i_0 &= tblsiz - 1; uf -= redux; const z: f64 = x - uf; - var r: f64 = exp2ft[@intCast(usize, i_0)]; + var r: f64 = exp2ft[@as(usize, @intCast(i_0))]; const t: f64 = r * z; r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4); - return @floatCast(f32, r * uk); + return @as(f32, @floatCast(r * uk)); } pub fn exp2(x: f64) callconv(.C) f64 { - const tblsiz: u32 = @intCast(u32, exp2dt.len / 2); - const redux: f64 = 0x1.8p52 / @floatFromInt(f64, tblsiz); + const tblsiz: u32 = @as(u32, @intCast(exp2dt.len / 2)); + const redux: f64 = 0x1.8p52 / @as(f64, @floatFromInt(tblsiz)); const P1: f64 = 0x1.62e42fefa39efp-1; const P2: f64 = 0x1.ebfbdff82c575p-3; const P3: f64 = 0x1.c6b08d704a0a6p-5; const P4: f64 = 0x1.3b2ab88f70400p-7; const P5: f64 = 0x1.5d88003875c74p-10; - const ux = @bitCast(u64, x); - const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF; + const ux = @as(u64, @bitCast(x)); + const ix = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF; // TODO: This should be handled beneath. if (math.isNan(x)) { @@ -119,7 +119,7 @@ pub fn exp2(x: f64) callconv(.C) f64 { if (ux >> 63 != 0) { // underflow if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) { - math.doNotOptimizeAway(@floatCast(f32, -0x1.0p-149 / x)); + math.doNotOptimizeAway(@as(f32, @floatCast(-0x1.0p-149 / x))); } if (x <= -1075) { return 0; @@ -139,18 +139,18 @@ pub fn exp2(x: f64) callconv(.C) f64 { // reduce x var uf: f64 = x + redux; // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here - var i_0: u32 = @truncate(u32, @bitCast(u64, uf)); + var i_0: u32 = @as(u32, @truncate(@as(u64, @bitCast(uf)))); i_0 +%= tblsiz / 2; const k: u32 = i_0 / tblsiz * tblsiz; - const ik: i32 = @divTrunc(@bitCast(i32, k), tblsiz); + const ik: i32 = @divTrunc(@as(i32, @bitCast(k)), tblsiz); i_0 %= tblsiz; uf -= redux; // r = exp2(y) = exp2t[i_0] * p(z - eps[i]) var z: f64 = x - uf; - const t: f64 = exp2dt[@intCast(usize, 2 * i_0)]; - z -= exp2dt[@intCast(usize, 2 * i_0 + 1)]; + const t: f64 = exp2dt[@as(usize, @intCast(2 * i_0))]; + z -= exp2dt[@as(usize, @intCast(2 * i_0 + 1))]; const r: f64 = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5)))); return math.scalbn(r, ik); @@ -158,12 +158,12 @@ pub fn exp2(x: f64) callconv(.C) f64 { pub fn __exp2x(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, exp2q(x)); + return @as(f80, @floatCast(exp2q(x))); } pub fn exp2q(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return exp2(@floatCast(f64, x)); + return exp2(@as(f64, @floatCast(x))); } pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/extenddftf2.zig b/lib/compiler_rt/extenddftf2.zig index e7b2d8ed705b..af293b5ea272 100644 --- a/lib/compiler_rt/extenddftf2.zig +++ b/lib/compiler_rt/extenddftf2.zig @@ -13,9 +13,9 @@ comptime { } pub fn __extenddftf2(a: f64) callconv(.C) f128 { - return extendf(f128, f64, @bitCast(u64, a)); + return extendf(f128, f64, @as(u64, @bitCast(a))); } fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void { - c.* = extendf(f128, f64, @bitCast(u64, a)); + c.* = extendf(f128, f64, @as(u64, @bitCast(a))); } diff --git a/lib/compiler_rt/extenddfxf2.zig b/lib/compiler_rt/extenddfxf2.zig index c9e10d57ecec..54232d0b70c4 100644 --- a/lib/compiler_rt/extenddfxf2.zig +++ b/lib/compiler_rt/extenddfxf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __extenddfxf2(a: f64) callconv(.C) f80 { - return extend_f80(f64, @bitCast(u64, a)); + return extend_f80(f64, @as(u64, @bitCast(a))); } diff --git a/lib/compiler_rt/extendf.zig b/lib/compiler_rt/extendf.zig index feafbfc89330..0d9f295ed048 100644 --- a/lib/compiler_rt/extendf.zig +++ b/lib/compiler_rt/extendf.zig @@ -33,7 +33,7 @@ pub inline fn extendf( const dstMinNormal: dst_rep_t = @as(dst_rep_t, 1) << dstSigBits; // Break a into a sign and representation of the absolute value - const aRep: src_rep_t = @bitCast(src_rep_t, a); + const aRep: src_rep_t = @as(src_rep_t, @bitCast(a)); const aAbs: src_rep_t = aRep & srcAbsMask; const sign: src_rep_t = aRep & srcSignMask; var absResult: dst_rep_t = undefined; @@ -58,10 +58,10 @@ pub inline fn extendf( // the correct adjusted exponent in the destination type. const scale: u32 = @clz(aAbs) - @clz(@as(src_rep_t, srcMinNormal)); - absResult = @as(dst_rep_t, aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale); + absResult = @as(dst_rep_t, aAbs) << @as(DstShift, @intCast(dstSigBits - srcSigBits + scale)); absResult ^= dstMinNormal; const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1; - absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits; + absResult |= @as(dst_rep_t, @intCast(resultExponent)) << dstSigBits; } else { // a is zero. absResult = 0; @@ -69,7 +69,7 @@ pub inline fn extendf( // Apply the signbit to (dst_t)abs(a). const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @as(dst_rep_t, sign) << (dstBits - srcBits); - return @bitCast(dst_t, result); + return @as(dst_t, @bitCast(result)); } pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 { @@ -104,7 +104,7 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI // a is a normal number. // Extend to the destination type by shifting the significand and // exponent into the proper position and rebiasing the exponent. - dst.exp = @intCast(u16, a_abs >> src_sig_bits); + dst.exp = @as(u16, @intCast(a_abs >> src_sig_bits)); dst.exp += dst_exp_bias - src_exp_bias; dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits); dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers @@ -124,9 +124,9 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI const scale: u16 = @clz(a_abs) - @clz(@as(src_rep_t, src_min_normal)); - dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale); + dst.fraction = @as(u64, a_abs) << @as(u6, @intCast(dst_sig_bits - src_sig_bits + scale)); dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers - dst.exp = @truncate(u16, a_abs >> @intCast(SrcShift, src_sig_bits - scale)); + dst.exp = @as(u16, @truncate(a_abs >> @as(SrcShift, @intCast(src_sig_bits - scale)))); dst.exp ^= 1; dst.exp |= dst_exp_bias - src_exp_bias - scale + 1; } else { diff --git a/lib/compiler_rt/extendf_test.zig b/lib/compiler_rt/extendf_test.zig index e9192ae5250c..966a0c14a6a2 100644 --- a/lib/compiler_rt/extendf_test.zig +++ b/lib/compiler_rt/extendf_test.zig @@ -11,12 +11,12 @@ const F16T = @import("./common.zig").F16T; fn test__extenddfxf2(a: f64, expected: u80) !void { const x = __extenddfxf2(a); - const rep = @bitCast(u80, x); + const rep = @as(u80, @bitCast(x)); if (rep == expected) return; // test other possible NaN representation(signal NaN) - if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) + if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x)) return; @panic("__extenddfxf2 test failure"); @@ -25,9 +25,9 @@ fn test__extenddfxf2(a: f64, expected: u80) !void { fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void { const x = __extenddftf2(a); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) return; @@ -45,14 +45,14 @@ fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void { } fn test__extendhfsf2(a: u16, expected: u32) !void { - const x = __extendhfsf2(@bitCast(F16T(f32), a)); - const rep = @bitCast(u32, x); + const x = __extendhfsf2(@as(F16T(f32), @bitCast(a))); + const rep = @as(u32, @bitCast(x)); if (rep == expected) { if (rep & 0x7fffffff > 0x7f800000) { return; // NaN is always unequal. } - if (x == @bitCast(f32, expected)) { + if (x == @as(f32, @bitCast(expected))) { return; } } @@ -63,9 +63,9 @@ fn test__extendhfsf2(a: u16, expected: u32) !void { fn test__extendsftf2(a: f32, expected_hi: u64, expected_lo: u64) !void { const x = __extendsftf2(a); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) return; @@ -184,35 +184,35 @@ test "extendsftf2" { } fn makeQNaN64() f64 { - return @bitCast(f64, @as(u64, 0x7ff8000000000000)); + return @as(f64, @bitCast(@as(u64, 0x7ff8000000000000))); } fn makeInf64() f64 { - return @bitCast(f64, @as(u64, 0x7ff0000000000000)); + return @as(f64, @bitCast(@as(u64, 0x7ff0000000000000))); } fn makeNaN64(rand: u64) f64 { - return @bitCast(f64, 0x7ff0000000000000 | (rand & 0xfffffffffffff)); + return @as(f64, @bitCast(0x7ff0000000000000 | (rand & 0xfffffffffffff))); } fn makeQNaN32() f32 { - return @bitCast(f32, @as(u32, 0x7fc00000)); + return @as(f32, @bitCast(@as(u32, 0x7fc00000))); } fn makeNaN32(rand: u32) f32 { - return @bitCast(f32, 0x7f800000 | (rand & 0x7fffff)); + return @as(f32, @bitCast(0x7f800000 | (rand & 0x7fffff))); } fn makeInf32() f32 { - return @bitCast(f32, @as(u32, 0x7f800000)); + return @as(f32, @bitCast(@as(u32, 0x7f800000))); } fn test__extendhftf2(a: u16, expected_hi: u64, expected_lo: u64) !void { - const x = __extendhftf2(@bitCast(F16T(f128), a)); + const x = __extendhftf2(@as(F16T(f128), @bitCast(a))); - const rep = @bitCast(u128, x); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(x)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expected_hi and lo == expected_lo) return; diff --git a/lib/compiler_rt/extendhfdf2.zig b/lib/compiler_rt/extendhfdf2.zig index 1a95002883fb..92aa3591ffb8 100644 --- a/lib/compiler_rt/extendhfdf2.zig +++ b/lib/compiler_rt/extendhfdf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.C) f64 { - return extendf(f64, f16, @bitCast(u16, a)); + return extendf(f64, f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendhfsf2.zig b/lib/compiler_rt/extendhfsf2.zig index 86ef751c3590..eb144cc79a1a 100644 --- a/lib/compiler_rt/extendhfsf2.zig +++ b/lib/compiler_rt/extendhfsf2.zig @@ -13,13 +13,13 @@ comptime { } pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.C) f32 { - return extendf(f32, f16, @bitCast(u16, a)); + return extendf(f32, f16, @as(u16, @bitCast(a))); } fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.C) f32 { - return extendf(f32, f16, @bitCast(u16, a)); + return extendf(f32, f16, @as(u16, @bitCast(a))); } fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 { - return extendf(f32, f16, @bitCast(u16, a)); + return extendf(f32, f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendhftf2.zig b/lib/compiler_rt/extendhftf2.zig index 6479a0f52f65..3e63b94e429b 100644 --- a/lib/compiler_rt/extendhftf2.zig +++ b/lib/compiler_rt/extendhftf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __extendhftf2(a: common.F16T(f128)) callconv(.C) f128 { - return extendf(f128, f16, @bitCast(u16, a)); + return extendf(f128, f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendhfxf2.zig b/lib/compiler_rt/extendhfxf2.zig index bd685827665a..2858641d43eb 100644 --- a/lib/compiler_rt/extendhfxf2.zig +++ b/lib/compiler_rt/extendhfxf2.zig @@ -8,5 +8,5 @@ comptime { } fn __extendhfxf2(a: common.F16T(f80)) callconv(.C) f80 { - return extend_f80(f16, @bitCast(u16, a)); + return extend_f80(f16, @as(u16, @bitCast(a))); } diff --git a/lib/compiler_rt/extendsfdf2.zig b/lib/compiler_rt/extendsfdf2.zig index 0a7ba8df531d..4ab928881809 100644 --- a/lib/compiler_rt/extendsfdf2.zig +++ b/lib/compiler_rt/extendsfdf2.zig @@ -12,9 +12,9 @@ comptime { } fn __extendsfdf2(a: f32) callconv(.C) f64 { - return extendf(f64, f32, @bitCast(u32, a)); + return extendf(f64, f32, @as(u32, @bitCast(a))); } fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 { - return extendf(f64, f32, @bitCast(u32, a)); + return extendf(f64, f32, @as(u32, @bitCast(a))); } diff --git a/lib/compiler_rt/extendsftf2.zig b/lib/compiler_rt/extendsftf2.zig index a74319745a3e..37834387718e 100644 --- a/lib/compiler_rt/extendsftf2.zig +++ b/lib/compiler_rt/extendsftf2.zig @@ -13,9 +13,9 @@ comptime { } pub fn __extendsftf2(a: f32) callconv(.C) f128 { - return extendf(f128, f32, @bitCast(u32, a)); + return extendf(f128, f32, @as(u32, @bitCast(a))); } fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void { - c.* = extendf(f128, f32, @bitCast(u32, a)); + c.* = extendf(f128, f32, @as(u32, @bitCast(a))); } diff --git a/lib/compiler_rt/extendsfxf2.zig b/lib/compiler_rt/extendsfxf2.zig index 938e65c1bd71..f41a921f9e46 100644 --- a/lib/compiler_rt/extendsfxf2.zig +++ b/lib/compiler_rt/extendsfxf2.zig @@ -8,5 +8,5 @@ comptime { } fn __extendsfxf2(a: f32) callconv(.C) f80 { - return extend_f80(f32, @bitCast(u32, a)); + return extend_f80(f32, @as(u32, @bitCast(a))); } diff --git a/lib/compiler_rt/extendxftf2.zig b/lib/compiler_rt/extendxftf2.zig index c3243d3018b7..3ddceb6c6306 100644 --- a/lib/compiler_rt/extendxftf2.zig +++ b/lib/compiler_rt/extendxftf2.zig @@ -39,12 +39,12 @@ fn __extendxftf2(a: f80) callconv(.C) f128 { // renormalize the significand and clear the leading bit and integer part, // then insert the correct adjusted exponent in the destination type. const scale: u32 = @clz(a_rep.fraction); - abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1); + abs_result = @as(u128, a_rep.fraction) << @as(u7, @intCast(dst_sig_bits - src_sig_bits + scale + 1)); abs_result ^= dst_min_normal; abs_result |= @as(u128, scale + 1) << dst_sig_bits; } // Apply the signbit to (dst_t)abs(a). const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16); - return @bitCast(f128, result); + return @as(f128, @bitCast(result)); } diff --git a/lib/compiler_rt/fabs.zig b/lib/compiler_rt/fabs.zig index b38e15e593bb..a58cb1fb08b5 100644 --- a/lib/compiler_rt/fabs.zig +++ b/lib/compiler_rt/fabs.zig @@ -51,7 +51,7 @@ pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble { inline fn generic_fabs(x: anytype) @TypeOf(x) { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - const float_bits = @bitCast(TBits, x); + const float_bits = @as(TBits, @bitCast(x)); const remove_sign = ~@as(TBits, 0) >> 1; - return @bitCast(T, float_bits & remove_sign); + return @as(T, @bitCast(float_bits & remove_sign)); } diff --git a/lib/compiler_rt/ffsdi2_test.zig b/lib/compiler_rt/ffsdi2_test.zig index 26d8a195e547..135052bf3903 100644 --- a/lib/compiler_rt/ffsdi2_test.zig +++ b/lib/compiler_rt/ffsdi2_test.zig @@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffsdi2(a: u64, expected: i32) !void { - var x = @bitCast(i64, a); + var x = @as(i64, @bitCast(a)); var result = ffs.__ffsdi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ffssi2_test.zig b/lib/compiler_rt/ffssi2_test.zig index 884d7e47fcfd..38435a9e4bb8 100644 --- a/lib/compiler_rt/ffssi2_test.zig +++ b/lib/compiler_rt/ffssi2_test.zig @@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffssi2(a: u32, expected: i32) !void { - var x = @bitCast(i32, a); + var x = @as(i32, @bitCast(a)); var result = ffs.__ffssi2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/ffsti2_test.zig b/lib/compiler_rt/ffsti2_test.zig index ce473b7f4e50..a0686b33e40d 100644 --- a/lib/compiler_rt/ffsti2_test.zig +++ b/lib/compiler_rt/ffsti2_test.zig @@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffsti2(a: u128, expected: i32) !void { - var x = @bitCast(i128, a); + var x = @as(i128, @bitCast(a)); var result = ffs.__ffsti2(x); try testing.expectEqual(expected, result); } diff --git a/lib/compiler_rt/fixdfti.zig b/lib/compiler_rt/fixdfti.zig index c3513f6becd3..8ee7ce40c577 100644 --- a/lib/compiler_rt/fixdfti.zig +++ b/lib/compiler_rt/fixdfti.zig @@ -19,5 +19,5 @@ pub fn __fixdfti(a: f64) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixhfti.zig b/lib/compiler_rt/fixhfti.zig index d2b288a52da1..50fd26a9fa0c 100644 --- a/lib/compiler_rt/fixhfti.zig +++ b/lib/compiler_rt/fixhfti.zig @@ -19,5 +19,5 @@ pub fn __fixhfti(a: f16) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixsfti.zig b/lib/compiler_rt/fixsfti.zig index 033e5be5b8cb..9110b3ca3816 100644 --- a/lib/compiler_rt/fixsfti.zig +++ b/lib/compiler_rt/fixsfti.zig @@ -19,5 +19,5 @@ pub fn __fixsfti(a: f32) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixtfti.zig b/lib/compiler_rt/fixtfti.zig index c3f574ed8a1c..6c7a8170f991 100644 --- a/lib/compiler_rt/fixtfti.zig +++ b/lib/compiler_rt/fixtfti.zig @@ -21,5 +21,5 @@ pub fn __fixtfti(a: f128) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/fixunsdfti.zig b/lib/compiler_rt/fixunsdfti.zig index 67959fb98afa..31483d91f9dc 100644 --- a/lib/compiler_rt/fixunsdfti.zig +++ b/lib/compiler_rt/fixunsdfti.zig @@ -19,5 +19,5 @@ pub fn __fixunsdfti(a: f64) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig index 5e767dc36c1e..97a1541aa399 100644 --- a/lib/compiler_rt/fixunshfti.zig +++ b/lib/compiler_rt/fixunshfti.zig @@ -19,5 +19,5 @@ pub fn __fixunshfti(a: f16) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunssfti.zig b/lib/compiler_rt/fixunssfti.zig index 947164b36906..d99b2bfd381d 100644 --- a/lib/compiler_rt/fixunssfti.zig +++ b/lib/compiler_rt/fixunssfti.zig @@ -19,5 +19,5 @@ pub fn __fixunssfti(a: f32) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunstfti.zig b/lib/compiler_rt/fixunstfti.zig index bf9764b1aa09..d796849b686c 100644 --- a/lib/compiler_rt/fixunstfti.zig +++ b/lib/compiler_rt/fixunstfti.zig @@ -21,5 +21,5 @@ pub fn __fixunstfti(a: f128) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixunsxfti.zig b/lib/compiler_rt/fixunsxfti.zig index b9ed4d813284..86216aa5600a 100644 --- a/lib/compiler_rt/fixunsxfti.zig +++ b/lib/compiler_rt/fixunsxfti.zig @@ -19,5 +19,5 @@ pub fn __fixunsxfti(a: f80) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(u128, a)); + return @as(v2u64, @bitCast(intFromFloat(u128, a))); } diff --git a/lib/compiler_rt/fixxfti.zig b/lib/compiler_rt/fixxfti.zig index c9a32d8ad4db..f04c68d2395c 100644 --- a/lib/compiler_rt/fixxfti.zig +++ b/lib/compiler_rt/fixxfti.zig @@ -19,5 +19,5 @@ pub fn __fixxfti(a: f80) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 { - return @bitCast(v2u64, intFromFloat(i128, a)); + return @as(v2u64, @bitCast(intFromFloat(i128, a))); } diff --git a/lib/compiler_rt/float_from_int.zig b/lib/compiler_rt/float_from_int.zig index 8a2c233cba87..c9ecba07f8b7 100644 --- a/lib/compiler_rt/float_from_int.zig +++ b/lib/compiler_rt/float_from_int.zig @@ -25,17 +25,17 @@ pub fn floatFromInt(comptime T: type, x: anytype) T { // Compute significand var exp = int_bits - @clz(abs_val) - 1; if (int_bits <= fractional_bits or exp <= fractional_bits) { - const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp); + const shift_amt = fractional_bits - @as(math.Log2Int(uT), @intCast(exp)); // Shift up result to line up with the significand - no rounding required - result = (@intCast(uT, abs_val) << shift_amt); + result = (@as(uT, @intCast(abs_val)) << shift_amt); result ^= implicit_bit; // Remove implicit integer bit } else { - var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits); + var shift_amt = @as(math.Log2Int(Z), @intCast(exp - fractional_bits)); const exact_tie: bool = @ctz(abs_val) == shift_amt - 1; // Shift down result and remove implicit integer bit - result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1); + result = @as(uT, @intCast((abs_val >> (shift_amt - 1)))) ^ (implicit_bit << 1); // Round result, including round-to-even for exact ties result = ((result + 1) >> 1) & ~@as(uT, @intFromBool(exact_tie)); @@ -43,14 +43,14 @@ pub fn floatFromInt(comptime T: type, x: anytype) T { // Compute exponent if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity - return @bitCast(T, sign_bit | @bitCast(uT, inf)); + return @as(T, @bitCast(sign_bit | @as(uT, @bitCast(inf)))); result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T); // If the result included a carry, we need to restore the explicit integer bit if (T == f80) result |= 1 << fractional_bits; - return @bitCast(T, sign_bit | result); + return @as(T, @bitCast(sign_bit | result)); } test { diff --git a/lib/compiler_rt/float_from_int_test.zig b/lib/compiler_rt/float_from_int_test.zig index bbc315c74554..734168e3c557 100644 --- a/lib/compiler_rt/float_from_int_test.zig +++ b/lib/compiler_rt/float_from_int_test.zig @@ -30,12 +30,12 @@ const __floatuntitf = @import("floatuntitf.zig").__floatuntitf; fn test__floatsisf(a: i32, expected: u32) !void { const r = __floatsisf(a); - try std.testing.expect(@bitCast(u32, r) == expected); + try std.testing.expect(@as(u32, @bitCast(r)) == expected); } fn test_one_floatunsisf(a: u32, expected: u32) !void { const r = __floatunsisf(a); - try std.testing.expect(@bitCast(u32, r) == expected); + try std.testing.expect(@as(u32, @bitCast(r)) == expected); } test "floatsisf" { @@ -43,7 +43,7 @@ test "floatsisf" { try test__floatsisf(1, 0x3f800000); try test__floatsisf(-1, 0xbf800000); try test__floatsisf(0x7FFFFFFF, 0x4f000000); - try test__floatsisf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xcf000000); + try test__floatsisf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xcf000000); } test "floatunsisf" { @@ -72,10 +72,10 @@ test "floatdisf" { try test__floatdisf(-2, -2.0); try test__floatdisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62); try test__floatdisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000008000000000)), -0x1.FFFFFEp+62); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000010000000000)), -0x1.FFFFFCp+62); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000000)), -0x1.000000p+63); - try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000001)), -0x1.000000p+63); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000008000000000))), -0x1.FFFFFEp+62); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000010000000000))), -0x1.FFFFFCp+62); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -0x1.000000p+63); + try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -0x1.000000p+63); try test__floatdisf(0x0007FB72E8000000, 0x1.FEDCBAp+50); try test__floatdisf(0x0007FB72EA000000, 0x1.FEDCBAp+50); try test__floatdisf(0x0007FB72EB000000, 0x1.FEDCBAp+50); @@ -228,17 +228,17 @@ test "floatuntisf" { try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76); // Test overflow to infinity - try test__floatuntisf(@as(u128, math.maxInt(u128)), @bitCast(f32, math.inf(f32))); + try test__floatuntisf(@as(u128, math.maxInt(u128)), @as(f32, @bitCast(math.inf(f32)))); } fn test_one_floatsidf(a: i32, expected: u64) !void { const r = __floatsidf(a); - try std.testing.expect(@bitCast(u64, r) == expected); + try std.testing.expect(@as(u64, @bitCast(r)) == expected); } fn test_one_floatunsidf(a: u32, expected: u64) !void { const r = __floatunsidf(a); - try std.testing.expect(@bitCast(u64, r) == expected); + try std.testing.expect(@as(u64, @bitCast(r)) == expected); } test "floatsidf" { @@ -246,15 +246,15 @@ test "floatsidf" { try test_one_floatsidf(1, 0x3ff0000000000000); try test_one_floatsidf(-1, 0xbff0000000000000); try test_one_floatsidf(0x7FFFFFFF, 0x41dfffffffc00000); - try test_one_floatsidf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc1e0000000000000); + try test_one_floatsidf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xc1e0000000000000); } test "floatunsidf" { try test_one_floatunsidf(0, 0x0000000000000000); try test_one_floatunsidf(1, 0x3ff0000000000000); try test_one_floatunsidf(0x7FFFFFFF, 0x41dfffffffc00000); - try test_one_floatunsidf(@intCast(u32, 0x80000000), 0x41e0000000000000); - try test_one_floatunsidf(@intCast(u32, 0xFFFFFFFF), 0x41efffffffe00000); + try test_one_floatunsidf(@as(u32, @intCast(0x80000000)), 0x41e0000000000000); + try test_one_floatunsidf(@as(u32, @intCast(0xFFFFFFFF)), 0x41efffffffe00000); } fn test__floatdidf(a: i64, expected: f64) !void { @@ -279,12 +279,12 @@ test "floatdidf" { try test__floatdidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62); try test__floatdidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); try test__floatdidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000008000000000)), -0x1.FFFFFEp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000800)), -0x1.FFFFFFFFFFFFEp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000010000000000)), -0x1.FFFFFCp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000001000)), -0x1.FFFFFFFFFFFFCp+62); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000000)), -0x1.000000p+63); - try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000001)), -0x1.000000p+63); // 0x8000000000000001 + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000008000000000)))), -0x1.FFFFFEp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000800)))), -0x1.FFFFFFFFFFFFEp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000010000000000)))), -0x1.FFFFFCp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000001000)))), -0x1.FFFFFFFFFFFFCp+62); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))), -0x1.000000p+63); + try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000001)))), -0x1.000000p+63); // 0x8000000000000001 try test__floatdidf(0x0007FB72E8000000, 0x1.FEDCBAp+50); try test__floatdidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50); try test__floatdidf(0x0007FB72EB000000, 0x1.FEDCBACp+50); @@ -505,7 +505,7 @@ test "floatuntidf" { fn test__floatsitf(a: i32, expected: u128) !void { const r = __floatsitf(a); - try std.testing.expect(@bitCast(u128, r) == expected); + try std.testing.expect(@as(u128, @bitCast(r)) == expected); } test "floatsitf" { @@ -513,16 +513,16 @@ test "floatsitf" { try test__floatsitf(0x7FFFFFFF, 0x401dfffffffc00000000000000000000); try test__floatsitf(0x12345678, 0x401b2345678000000000000000000000); try test__floatsitf(-0x12345678, 0xc01b2345678000000000000000000000); - try test__floatsitf(@bitCast(i32, @intCast(u32, 0xffffffff)), 0xbfff0000000000000000000000000000); - try test__floatsitf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc01e0000000000000000000000000000); + try test__floatsitf(@as(i32, @bitCast(@as(u32, @intCast(0xffffffff)))), 0xbfff0000000000000000000000000000); + try test__floatsitf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xc01e0000000000000000000000000000); } fn test__floatunsitf(a: u32, expected_hi: u64, expected_lo: u64) !void { const x = __floatunsitf(a); - const x_repr = @bitCast(u128, x); - const x_hi = @intCast(u64, x_repr >> 64); - const x_lo = @truncate(u64, x_repr); + const x_repr = @as(u128, @bitCast(x)); + const x_hi = @as(u64, @intCast(x_repr >> 64)); + const x_lo = @as(u64, @truncate(x_repr)); if (x_hi == expected_hi and x_lo == expected_lo) { return; @@ -552,9 +552,9 @@ fn test__floatditf(a: i64, expected: f128) !void { fn test__floatunditf(a: u64, expected_hi: u64, expected_lo: u64) !void { const x = __floatunditf(a); - const x_repr = @bitCast(u128, x); - const x_hi = @intCast(u64, x_repr >> 64); - const x_lo = @truncate(u64, x_repr); + const x_repr = @as(u128, @bitCast(x)); + const x_hi = @as(u64, @intCast(x_repr >> 64)); + const x_lo = @as(u64, @truncate(x_repr)); if (x_hi == expected_hi and x_lo == expected_lo) { return; @@ -575,10 +575,10 @@ test "floatditf" { try test__floatditf(0x2, make_tf(0x4000000000000000, 0x0)); try test__floatditf(0x1, make_tf(0x3fff000000000000, 0x0)); try test__floatditf(0x0, make_tf(0x0, 0x0)); - try test__floatditf(@bitCast(i64, @as(u64, 0xffffffffffffffff)), make_tf(0xbfff000000000000, 0x0)); - try test__floatditf(@bitCast(i64, @as(u64, 0xfffffffffffffffe)), make_tf(0xc000000000000000, 0x0)); + try test__floatditf(@as(i64, @bitCast(@as(u64, 0xffffffffffffffff))), make_tf(0xbfff000000000000, 0x0)); + try test__floatditf(@as(i64, @bitCast(@as(u64, 0xfffffffffffffffe))), make_tf(0xc000000000000000, 0x0)); try test__floatditf(-0x123456789abcdef1, make_tf(0xc03b23456789abcd, 0xef10000000000000)); - try test__floatditf(@bitCast(i64, @as(u64, 0x8000000000000000)), make_tf(0xc03e000000000000, 0x0)); + try test__floatditf(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), make_tf(0xc03e000000000000, 0x0)); } test "floatunditf" { @@ -773,7 +773,7 @@ fn make_ti(high: u64, low: u64) i128 { var result: u128 = high; result <<= 64; result |= low; - return @bitCast(i128, result); + return @as(i128, @bitCast(result)); } fn make_uti(high: u64, low: u64) u128 { @@ -787,7 +787,7 @@ fn make_tf(high: u64, low: u64) f128 { var result: u128 = high; result <<= 64; result |= low; - return @bitCast(f128, result); + return @as(f128, @bitCast(result)); } test "conversion to f16" { @@ -815,22 +815,22 @@ test "conversion to f80" { const floatFromInt = @import("./float_from_int.zig").floatFromInt; try testing.expect(floatFromInt(f80, @as(i80, -12)) == -12); - try testing.expect(@intFromFloat(u80, floatFromInt(f80, @as(u64, math.maxInt(u64)) + 0)) == math.maxInt(u64) + 0); - try testing.expect(@intFromFloat(u80, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); + try testing.expect(@as(u80, @intFromFloat(floatFromInt(f80, @as(u64, math.maxInt(u64)) + 0))) == math.maxInt(u64) + 0); + try testing.expect(@as(u80, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1))) == math.maxInt(u64) + 1); try testing.expect(floatFromInt(f80, @as(u32, 0)) == 0.0); try testing.expect(floatFromInt(f80, @as(u32, 1)) == 1.0); - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u32, math.maxInt(u24)) + 0)) == math.maxInt(u24)); - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 0)) == math.maxInt(u64)); - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); // Exact - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 2)) == math.maxInt(u64) + 1); // Rounds down - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 3)) == math.maxInt(u64) + 3); // Tie - Exact - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 4)) == math.maxInt(u64) + 5); // Rounds up - - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 0)) == math.maxInt(u65) + 1); // Rounds up - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 1)) == math.maxInt(u65) + 1); // Exact - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 2)) == math.maxInt(u65) + 1); // Rounds down - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 3)) == math.maxInt(u65) + 1); // Tie - Rounds down - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 4)) == math.maxInt(u65) + 5); // Rounds up - try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 5)) == math.maxInt(u65) + 5); // Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u32, math.maxInt(u24)) + 0))) == math.maxInt(u24)); + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 0))) == math.maxInt(u64)); + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1))) == math.maxInt(u64) + 1); // Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 2))) == math.maxInt(u64) + 1); // Rounds down + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 3))) == math.maxInt(u64) + 3); // Tie - Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 4))) == math.maxInt(u64) + 5); // Rounds up + + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 0))) == math.maxInt(u65) + 1); // Rounds up + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 1))) == math.maxInt(u65) + 1); // Exact + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 2))) == math.maxInt(u65) + 1); // Rounds down + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 3))) == math.maxInt(u65) + 1); // Tie - Rounds down + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 4))) == math.maxInt(u65) + 5); // Rounds up + try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 5))) == math.maxInt(u65) + 5); // Exact } diff --git a/lib/compiler_rt/floattidf.zig b/lib/compiler_rt/floattidf.zig index c42e8f29744d..fa213d4d8043 100644 --- a/lib/compiler_rt/floattidf.zig +++ b/lib/compiler_rt/floattidf.zig @@ -17,5 +17,5 @@ pub fn __floattidf(a: i128) callconv(.C) f64 { } fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 { - return floatFromInt(f64, @bitCast(i128, a)); + return floatFromInt(f64, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattihf.zig b/lib/compiler_rt/floattihf.zig index 90003660ecf1..752e5b86639b 100644 --- a/lib/compiler_rt/floattihf.zig +++ b/lib/compiler_rt/floattihf.zig @@ -17,5 +17,5 @@ pub fn __floattihf(a: i128) callconv(.C) f16 { } fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 { - return floatFromInt(f16, @bitCast(i128, a)); + return floatFromInt(f16, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattisf.zig b/lib/compiler_rt/floattisf.zig index 09c0b12ed082..0f81bfbb85f3 100644 --- a/lib/compiler_rt/floattisf.zig +++ b/lib/compiler_rt/floattisf.zig @@ -17,5 +17,5 @@ pub fn __floattisf(a: i128) callconv(.C) f32 { } fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 { - return floatFromInt(f32, @bitCast(i128, a)); + return floatFromInt(f32, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattitf.zig b/lib/compiler_rt/floattitf.zig index ae0ecbb98aed..49397d34a3b9 100644 --- a/lib/compiler_rt/floattitf.zig +++ b/lib/compiler_rt/floattitf.zig @@ -19,5 +19,5 @@ pub fn __floattitf(a: i128) callconv(.C) f128 { } fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 { - return floatFromInt(f128, @bitCast(i128, a)); + return floatFromInt(f128, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floattixf.zig b/lib/compiler_rt/floattixf.zig index 9c2339ff8a40..a8fd2d6ae67e 100644 --- a/lib/compiler_rt/floattixf.zig +++ b/lib/compiler_rt/floattixf.zig @@ -17,5 +17,5 @@ pub fn __floattixf(a: i128) callconv(.C) f80 { } fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 { - return floatFromInt(f80, @bitCast(i128, a)); + return floatFromInt(f80, @as(i128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntidf.zig b/lib/compiler_rt/floatuntidf.zig index a2b46506f045..f036ffd7fe6a 100644 --- a/lib/compiler_rt/floatuntidf.zig +++ b/lib/compiler_rt/floatuntidf.zig @@ -17,5 +17,5 @@ pub fn __floatuntidf(a: u128) callconv(.C) f64 { } fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 { - return floatFromInt(f64, @bitCast(u128, a)); + return floatFromInt(f64, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntihf.zig b/lib/compiler_rt/floatuntihf.zig index f493453c91d2..97ccf7f5fe52 100644 --- a/lib/compiler_rt/floatuntihf.zig +++ b/lib/compiler_rt/floatuntihf.zig @@ -17,5 +17,5 @@ pub fn __floatuntihf(a: u128) callconv(.C) f16 { } fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 { - return floatFromInt(f16, @bitCast(u128, a)); + return floatFromInt(f16, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntisf.zig b/lib/compiler_rt/floatuntisf.zig index 9df7b833eafa..8d8f771e7d0d 100644 --- a/lib/compiler_rt/floatuntisf.zig +++ b/lib/compiler_rt/floatuntisf.zig @@ -17,5 +17,5 @@ pub fn __floatuntisf(a: u128) callconv(.C) f32 { } fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 { - return floatFromInt(f32, @bitCast(u128, a)); + return floatFromInt(f32, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntitf.zig b/lib/compiler_rt/floatuntitf.zig index 55a5ab4da111..e828f12d8b25 100644 --- a/lib/compiler_rt/floatuntitf.zig +++ b/lib/compiler_rt/floatuntitf.zig @@ -19,5 +19,5 @@ pub fn __floatuntitf(a: u128) callconv(.C) f128 { } fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 { - return floatFromInt(f128, @bitCast(u128, a)); + return floatFromInt(f128, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floatuntixf.zig b/lib/compiler_rt/floatuntixf.zig index cbf597ca8946..c9016bfa0664 100644 --- a/lib/compiler_rt/floatuntixf.zig +++ b/lib/compiler_rt/floatuntixf.zig @@ -17,5 +17,5 @@ pub fn __floatuntixf(a: u128) callconv(.C) f80 { } fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 { - return floatFromInt(f80, @bitCast(u128, a)); + return floatFromInt(f80, @as(u128, @bitCast(a))); } diff --git a/lib/compiler_rt/floor.zig b/lib/compiler_rt/floor.zig index ea274c0d8295..dd73be86fd8e 100644 --- a/lib/compiler_rt/floor.zig +++ b/lib/compiler_rt/floor.zig @@ -26,8 +26,8 @@ comptime { } pub fn __floorh(x: f16) callconv(.C) f16 { - var u = @bitCast(u16, x); - const e = @intCast(i16, (u >> 10) & 31) - 15; + var u = @as(u16, @bitCast(x)); + const e = @as(i16, @intCast((u >> 10) & 31)) - 15; var m: u16 = undefined; // TODO: Shouldn't need this explicit check. @@ -40,7 +40,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 { } if (e >= 0) { - m = @as(u16, 1023) >> @intCast(u4, e); + m = @as(u16, 1023) >> @as(u4, @intCast(e)); if (u & m == 0) { return x; } @@ -48,7 +48,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 { if (u >> 15 != 0) { u += m; } - return @bitCast(f16, u & ~m); + return @as(f16, @bitCast(u & ~m)); } else { math.doNotOptimizeAway(x + 0x1.0p120); if (u >> 15 == 0) { @@ -60,8 +60,8 @@ pub fn __floorh(x: f16) callconv(.C) f16 { } pub fn floorf(x: f32) callconv(.C) f32 { - var u = @bitCast(u32, x); - const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F; + var u = @as(u32, @bitCast(x)); + const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F; var m: u32 = undefined; // TODO: Shouldn't need this explicit check. @@ -74,7 +74,7 @@ pub fn floorf(x: f32) callconv(.C) f32 { } if (e >= 0) { - m = @as(u32, 0x007FFFFF) >> @intCast(u5, e); + m = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e)); if (u & m == 0) { return x; } @@ -82,7 +82,7 @@ pub fn floorf(x: f32) callconv(.C) f32 { if (u >> 31 != 0) { u += m; } - return @bitCast(f32, u & ~m); + return @as(f32, @bitCast(u & ~m)); } else { math.doNotOptimizeAway(x + 0x1.0p120); if (u >> 31 == 0) { @@ -96,7 +96,7 @@ pub fn floorf(x: f32) callconv(.C) f32 { pub fn floor(x: f64) callconv(.C) f64 { const f64_toint = 1.0 / math.floatEps(f64); - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; var y: f64 = undefined; @@ -126,13 +126,13 @@ pub fn floor(x: f64) callconv(.C) f64 { pub fn __floorx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, floorq(x)); + return @as(f80, @floatCast(floorq(x))); } pub fn floorq(x: f128) callconv(.C) f128 { const f128_toint = 1.0 / math.floatEps(f128); - const u = @bitCast(u128, x); + const u = @as(u128, @bitCast(x)); const e = (u >> 112) & 0x7FFF; var y: f128 = undefined; diff --git a/lib/compiler_rt/fma.zig b/lib/compiler_rt/fma.zig index fe2da1c99c68..ed0e6649c622 100644 --- a/lib/compiler_rt/fma.zig +++ b/lib/compiler_rt/fma.zig @@ -28,20 +28,20 @@ comptime { pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, fmaf(x, y, z)); + return @as(f16, @floatCast(fmaf(x, y, z))); } pub fn fmaf(x: f32, y: f32, z: f32) callconv(.C) f32 { const xy = @as(f64, x) * y; const xy_z = xy + z; - const u = @bitCast(u64, xy_z); + const u = @as(u64, @bitCast(xy_z)); const e = (u >> 52) & 0x7FF; if ((u & 0x1FFFFFFF) != 0x10000000 or e == 0x7FF or (xy_z - xy == z and xy_z - z == xy)) { - return @floatCast(f32, xy_z); + return @as(f32, @floatCast(xy_z)); } else { // TODO: Handle inexact case with double-rounding - return @floatCast(f32, xy_z); + return @as(f32, @floatCast(xy_z)); } } @@ -95,7 +95,7 @@ pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 { pub fn __fmax(a: f80, b: f80, c: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, fmaq(a, b, c)); + return @as(f80, @floatCast(fmaq(a, b, c))); } /// Fused multiply-add: Compute x * y + z with a single rounding error. @@ -201,12 +201,12 @@ fn dd_mul(a: f64, b: f64) dd { fn add_adjusted(a: f64, b: f64) f64 { var sum = dd_add(a, b); if (sum.lo != 0) { - var uhii = @bitCast(u64, sum.hi); + var uhii = @as(u64, @bitCast(sum.hi)); if (uhii & 1 == 0) { // hibits += copysign(1.0, sum.hi, sum.lo) - const uloi = @bitCast(u64, sum.lo); + const uloi = @as(u64, @bitCast(sum.lo)); uhii += 1 - ((uhii ^ uloi) >> 62); - sum.hi = @bitCast(f64, uhii); + sum.hi = @as(f64, @bitCast(uhii)); } } return sum.hi; @@ -215,12 +215,12 @@ fn add_adjusted(a: f64, b: f64) f64 { fn add_and_denorm(a: f64, b: f64, scale: i32) f64 { var sum = dd_add(a, b); if (sum.lo != 0) { - var uhii = @bitCast(u64, sum.hi); - const bits_lost = -@intCast(i32, (uhii >> 52) & 0x7FF) - scale + 1; + var uhii = @as(u64, @bitCast(sum.hi)); + const bits_lost = -@as(i32, @intCast((uhii >> 52) & 0x7FF)) - scale + 1; if ((bits_lost != 1) == (uhii & 1 != 0)) { - const uloi = @bitCast(u64, sum.lo); + const uloi = @as(u64, @bitCast(sum.lo)); uhii += 1 - (((uhii ^ uloi) >> 62) & 2); - sum.hi = @bitCast(f64, uhii); + sum.hi = @as(f64, @bitCast(uhii)); } } return math.scalbn(sum.hi, scale); @@ -257,12 +257,12 @@ fn dd_add128(a: f128, b: f128) dd128 { fn add_adjusted128(a: f128, b: f128) f128 { var sum = dd_add128(a, b); if (sum.lo != 0) { - var uhii = @bitCast(u128, sum.hi); + var uhii = @as(u128, @bitCast(sum.hi)); if (uhii & 1 == 0) { // hibits += copysign(1.0, sum.hi, sum.lo) - const uloi = @bitCast(u128, sum.lo); + const uloi = @as(u128, @bitCast(sum.lo)); uhii += 1 - ((uhii ^ uloi) >> 126); - sum.hi = @bitCast(f128, uhii); + sum.hi = @as(f128, @bitCast(uhii)); } } return sum.hi; @@ -282,12 +282,12 @@ fn add_and_denorm128(a: f128, b: f128, scale: i32) f128 { // If we are losing only one bit to denormalization, however, we must // break the ties manually. if (sum.lo != 0) { - var uhii = @bitCast(u128, sum.hi); - const bits_lost = -@intCast(i32, (uhii >> 112) & 0x7FFF) - scale + 1; + var uhii = @as(u128, @bitCast(sum.hi)); + const bits_lost = -@as(i32, @intCast((uhii >> 112) & 0x7FFF)) - scale + 1; if ((bits_lost != 1) == (uhii & 1 != 0)) { - const uloi = @bitCast(u128, sum.lo); + const uloi = @as(u128, @bitCast(sum.lo)); uhii += 1 - (((uhii ^ uloi) >> 126) & 2); - sum.hi = @bitCast(f128, uhii); + sum.hi = @as(f128, @bitCast(uhii)); } } return math.scalbn(sum.hi, scale); diff --git a/lib/compiler_rt/fmod.zig b/lib/compiler_rt/fmod.zig index b80dffdb820b..81706b71e176 100644 --- a/lib/compiler_rt/fmod.zig +++ b/lib/compiler_rt/fmod.zig @@ -22,7 +22,7 @@ comptime { pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, fmodf(x, y)); + return @as(f16, @floatCast(fmodf(x, y))); } pub fn fmodf(x: f32, y: f32) callconv(.C) f32 { @@ -46,12 +46,12 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { const signBit = (@as(Z, 1) << (significandBits + exponentBits)); const maxExponent = ((1 << exponentBits) - 1); - var aRep = @bitCast(Z, a); - var bRep = @bitCast(Z, b); + var aRep = @as(Z, @bitCast(a)); + var bRep = @as(Z, @bitCast(b)); const signA = aRep & signBit; - var expA = @intCast(i32, (@bitCast(Z, a) >> significandBits) & maxExponent); - var expB = @intCast(i32, (@bitCast(Z, b) >> significandBits) & maxExponent); + var expA = @as(i32, @intCast((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + var expB = @as(i32, @intCast((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); // There are 3 cases where the answer is undefined, check for: // - fmodx(val, 0) @@ -82,8 +82,8 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { var highA: u64 = 0; var highB: u64 = 0; - var lowA: u64 = @truncate(u64, aRep); - var lowB: u64 = @truncate(u64, bRep); + var lowA: u64 = @as(u64, @truncate(aRep)); + var lowB: u64 = @as(u64, @truncate(bRep)); while (expA > expB) : (expA -= 1) { var high = highA -% highB; @@ -123,11 +123,11 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { // Combine the exponent with the sign and significand, normalize if happened to be denormalized if (expA < -fractionalBits) { - return @bitCast(T, signA); + return @as(T, @bitCast(signA)); } else if (expA <= 0) { - return @bitCast(T, (lowA >> @intCast(math.Log2Int(u64), 1 - expA)) | signA); + return @as(T, @bitCast((lowA >> @as(math.Log2Int(u64), @intCast(1 - expA))) | signA)); } else { - return @bitCast(T, lowA | (@as(Z, @intCast(u16, expA)) << significandBits) | signA); + return @as(T, @bitCast(lowA | (@as(Z, @as(u16, @intCast(expA))) << significandBits) | signA)); } } @@ -136,10 +136,10 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 { pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { var amod = a; var bmod = b; - const aPtr_u64 = @ptrCast([*]u64, &amod); - const bPtr_u64 = @ptrCast([*]u64, &bmod); - const aPtr_u16 = @ptrCast([*]u16, &amod); - const bPtr_u16 = @ptrCast([*]u16, &bmod); + const aPtr_u64 = @as([*]u64, @ptrCast(&amod)); + const bPtr_u64 = @as([*]u64, @ptrCast(&bmod)); + const aPtr_u16 = @as([*]u16, @ptrCast(&amod)); + const bPtr_u16 = @as([*]u16, @ptrCast(&bmod)); const exp_and_sign_index = comptime switch (builtin.target.cpu.arch.endian()) { .Little => 7, @@ -155,8 +155,8 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { }; const signA = aPtr_u16[exp_and_sign_index] & 0x8000; - var expA = @intCast(i32, (aPtr_u16[exp_and_sign_index] & 0x7fff)); - var expB = @intCast(i32, (bPtr_u16[exp_and_sign_index] & 0x7fff)); + var expA = @as(i32, @intCast((aPtr_u16[exp_and_sign_index] & 0x7fff))); + var expB = @as(i32, @intCast((bPtr_u16[exp_and_sign_index] & 0x7fff))); // There are 3 cases where the answer is undefined, check for: // - fmodq(val, 0) @@ -173,8 +173,8 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { } // Remove the sign from both - aPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expA)); - bPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expB)); + aPtr_u16[exp_and_sign_index] = @as(u16, @bitCast(@as(i16, @intCast(expA)))); + bPtr_u16[exp_and_sign_index] = @as(u16, @bitCast(@as(i16, @intCast(expB)))); if (amod <= bmod) { if (amod == bmod) { return 0 * a; @@ -241,10 +241,10 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 { // Combine the exponent with the sign, normalize if happend to be denormalized if (expA <= 0) { - aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, (expA +% 120))) | signA; + aPtr_u16[exp_and_sign_index] = @as(u16, @truncate(@as(u32, @bitCast((expA +% 120))))) | signA; amod *= 0x1p-120; } else { - aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, expA)) | signA; + aPtr_u16[exp_and_sign_index] = @as(u16, @truncate(@as(u32, @bitCast(expA)))) | signA; } return amod; @@ -270,14 +270,14 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { const exp_bits = if (T == f32) 9 else 12; const bits_minus_1 = bits - 1; const mask = if (T == f32) 0xff else 0x7ff; - var ux = @bitCast(uint, x); - var uy = @bitCast(uint, y); - var ex = @intCast(i32, (ux >> digits) & mask); - var ey = @intCast(i32, (uy >> digits) & mask); - const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1); + var ux = @as(uint, @bitCast(x)); + var uy = @as(uint, @bitCast(y)); + var ex = @as(i32, @intCast((ux >> digits) & mask)); + var ey = @as(i32, @intCast((uy >> digits) & mask)); + const sx = if (T == f32) @as(u32, @intCast(ux & 0x80000000)) else @as(i32, @intCast(ux >> bits_minus_1)); var i: uint = undefined; - if (uy << 1 == 0 or math.isNan(@bitCast(T, uy)) or ex == mask) + if (uy << 1 == 0 or math.isNan(@as(T, @bitCast(uy))) or ex == mask) return (x * y) / (x * y); if (ux << 1 <= uy << 1) { @@ -293,7 +293,7 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { ex -= 1; i <<= 1; }) {} - ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1)); + ux <<= @as(log2uint, @intCast(@as(u32, @bitCast(-ex + 1)))); } else { ux &= math.maxInt(uint) >> exp_bits; ux |= 1 << digits; @@ -304,7 +304,7 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { ey -= 1; i <<= 1; }) {} - uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1)); + uy <<= @as(log2uint, @intCast(@as(u32, @bitCast(-ey + 1)))); } else { uy &= math.maxInt(uint) >> exp_bits; uy |= 1 << digits; @@ -334,16 +334,16 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T { // scale result up if (ex > 0) { ux -%= 1 << digits; - ux |= @as(uint, @bitCast(u32, ex)) << digits; + ux |= @as(uint, @as(u32, @bitCast(ex))) << digits; } else { - ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1)); + ux >>= @as(log2uint, @intCast(@as(u32, @bitCast(-ex + 1)))); } if (T == f32) { ux |= sx; } else { - ux |= @intCast(uint, sx) << bits_minus_1; + ux |= @as(uint, @intCast(sx)) << bits_minus_1; } - return @bitCast(T, ux); + return @as(T, @bitCast(ux)); } test "fmodf" { diff --git a/lib/compiler_rt/int.zig b/lib/compiler_rt/int.zig index 47ff9e4c0c1f..d61233e7cf51 100644 --- a/lib/compiler_rt/int.zig +++ b/lib/compiler_rt/int.zig @@ -52,8 +52,8 @@ test "test_divmodti4" { [_]i128{ -7, 5, -1, -2 }, [_]i128{ 19, 5, 3, 4 }, [_]i128{ 19, -5, -3, 4 }, - [_]i128{ @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 8, @bitCast(i128, @as(u128, 0xf0000000000000000000000000000000)), 0 }, - [_]i128{ @bitCast(i128, @as(u128, 0x80000000000000000000000000000007)), 8, @bitCast(i128, @as(u128, 0xf0000000000000000000000000000001)), -1 }, + [_]i128{ @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 8, @as(i128, @bitCast(@as(u128, 0xf0000000000000000000000000000000))), 0 }, + [_]i128{ @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000007))), 8, @as(i128, @bitCast(@as(u128, 0xf0000000000000000000000000000001))), -1 }, }; for (cases) |case| { @@ -85,8 +85,8 @@ test "test_divmoddi4" { [_]i64{ -7, 5, -1, -2 }, [_]i64{ 19, 5, 3, 4 }, [_]i64{ 19, -5, -3, 4 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 8, @bitCast(i64, @as(u64, 0xf000000000000000)), 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000007)), 8, @bitCast(i64, @as(u64, 0xf000000000000001)), -1 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 8, @as(i64, @bitCast(@as(u64, 0xf000000000000000))), 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000007))), 8, @as(i64, @bitCast(@as(u64, 0xf000000000000001))), -1 }, }; for (cases) |case| { @@ -110,14 +110,14 @@ test "test_udivmoddi4" { pub fn __divdi3(a: i64, b: i64) callconv(.C) i64 { // Set aside the sign of the quotient. - const sign = @bitCast(u64, (a ^ b) >> 63); + const sign = @as(u64, @bitCast((a ^ b) >> 63)); // Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63). const abs_a = (a ^ (a >> 63)) -% (a >> 63); const abs_b = (b ^ (b >> 63)) -% (b >> 63); // Unsigned division - const res = __udivmoddi4(@bitCast(u64, abs_a), @bitCast(u64, abs_b), null); + const res = __udivmoddi4(@as(u64, @bitCast(abs_a)), @as(u64, @bitCast(abs_b)), null); // Apply sign of quotient to result and return. - return @bitCast(i64, (res ^ sign) -% sign); + return @as(i64, @bitCast((res ^ sign) -% sign)); } test "test_divdi3" { @@ -129,10 +129,10 @@ test "test_divdi3" { [_]i64{ -2, 1, -2 }, [_]i64{ -2, -1, 2 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)) }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)) }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -2, 0x4000000000000000 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0xC000000000000000)) }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))) }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))) }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, 0x4000000000000000 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, @as(i64, @bitCast(@as(u64, 0xC000000000000000))) }, }; for (cases) |case| { @@ -151,9 +151,9 @@ pub fn __moddi3(a: i64, b: i64) callconv(.C) i64 { const abs_b = (b ^ (b >> 63)) -% (b >> 63); // Unsigned division var r: u64 = undefined; - _ = __udivmoddi4(@bitCast(u64, abs_a), @bitCast(u64, abs_b), &r); + _ = __udivmoddi4(@as(u64, @bitCast(abs_a)), @as(u64, @bitCast(abs_b)), &r); // Apply the sign of the dividend and return. - return (@bitCast(i64, r) ^ (a >> 63)) -% (a >> 63); + return (@as(i64, @bitCast(r)) ^ (a >> 63)) -% (a >> 63); } test "test_moddi3" { @@ -165,12 +165,12 @@ test "test_moddi3" { [_]i64{ -5, 3, -2 }, [_]i64{ -5, -3, -2 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 1, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -1, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 2, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -2, 0 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 3, -2 }, - [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -3, -2 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, 0 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 3, -2 }, + [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -3, -2 }, }; for (cases) |case| { @@ -225,8 +225,8 @@ test "test_divmodsi4" { [_]i32{ 19, 5, 3, 4 }, [_]i32{ 19, -5, -3, 4 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 8, @bitCast(i32, @as(u32, 0xf0000000)), 0 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000007)), 8, @bitCast(i32, @as(u32, 0xf0000001)), -1 }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 8, @as(i32, @bitCast(@as(u32, 0xf0000000))), 0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000007))), 8, @as(i32, @bitCast(@as(u32, 0xf0000001))), -1 }, }; for (cases) |case| { @@ -242,7 +242,7 @@ fn test_one_divmodsi4(a: i32, b: i32, expected_q: i32, expected_r: i32) !void { pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 { const d = __udivsi3(a, b); - rem.* = @bitCast(u32, @bitCast(i32, a) -% (@bitCast(i32, d) * @bitCast(i32, b))); + rem.* = @as(u32, @bitCast(@as(i32, @bitCast(a)) -% (@as(i32, @bitCast(d)) * @as(i32, @bitCast(b))))); return d; } @@ -256,14 +256,14 @@ fn __aeabi_idiv(n: i32, d: i32) callconv(.AAPCS) i32 { inline fn div_i32(n: i32, d: i32) i32 { // Set aside the sign of the quotient. - const sign = @bitCast(u32, (n ^ d) >> 31); + const sign = @as(u32, @bitCast((n ^ d) >> 31)); // Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31). const abs_n = (n ^ (n >> 31)) -% (n >> 31); const abs_d = (d ^ (d >> 31)) -% (d >> 31); // abs(a) / abs(b) - const res = @bitCast(u32, abs_n) / @bitCast(u32, abs_d); + const res = @as(u32, @bitCast(abs_n)) / @as(u32, @bitCast(abs_d)); // Apply sign of quotient to result and return. - return @bitCast(i32, (res ^ sign) -% sign); + return @as(i32, @bitCast((res ^ sign) -% sign)); } test "test_divsi3" { @@ -275,10 +275,10 @@ test "test_divsi3" { [_]i32{ -2, 1, -2 }, [_]i32{ -2, -1, 2 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 1, @bitCast(i32, @as(u32, 0x80000000)) }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), -1, @bitCast(i32, @as(u32, 0x80000000)) }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), -2, 0x40000000 }, - [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 2, @bitCast(i32, @as(u32, 0xC0000000)) }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 1, @as(i32, @bitCast(@as(u32, 0x80000000))) }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), -1, @as(i32, @bitCast(@as(u32, 0x80000000))) }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), -2, 0x40000000 }, + [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 2, @as(i32, @bitCast(@as(u32, 0xC0000000))) }, }; for (cases) |case| { @@ -304,7 +304,7 @@ inline fn div_u32(n: u32, d: u32) u32 { // special cases if (d == 0) return 0; // ?! if (n == 0) return 0; - var sr = @bitCast(c_uint, @as(c_int, @clz(d)) - @as(c_int, @clz(n))); + var sr = @as(c_uint, @bitCast(@as(c_int, @clz(d)) - @as(c_int, @clz(n)))); // 0 <= sr <= n_uword_bits - 1 or sr large if (sr > n_uword_bits - 1) { // d > r @@ -317,12 +317,12 @@ inline fn div_u32(n: u32, d: u32) u32 { sr += 1; // 1 <= sr <= n_uword_bits - 1 // Not a special case - var q: u32 = n << @intCast(u5, n_uword_bits - sr); - var r: u32 = n >> @intCast(u5, sr); + var q: u32 = n << @as(u5, @intCast(n_uword_bits - sr)); + var r: u32 = n >> @as(u5, @intCast(sr)); var carry: u32 = 0; while (sr > 0) : (sr -= 1) { // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> @intCast(u5, n_uword_bits - 1)); + r = (r << 1) | (q >> @as(u5, @intCast(n_uword_bits - 1))); q = (q << 1) | carry; // carry = 0; // if (r.all >= d.all) @@ -330,9 +330,9 @@ inline fn div_u32(n: u32, d: u32) u32 { // r.all -= d.all; // carry = 1; // } - const s = @bitCast(i32, d -% r -% 1) >> @intCast(u5, n_uword_bits - 1); - carry = @intCast(u32, s & 1); - r -= d & @bitCast(u32, s); + const s = @as(i32, @bitCast(d -% r -% 1)) >> @as(u5, @intCast(n_uword_bits - 1)); + carry = @as(u32, @intCast(s & 1)); + r -= d & @as(u32, @bitCast(s)); } q = (q << 1) | carry; return q; @@ -496,11 +496,11 @@ test "test_modsi3" { [_]i32{ 5, -3, 2 }, [_]i32{ -5, 3, -2 }, [_]i32{ -5, -3, -2 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 1, 0x0 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 2, 0x0 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), -2, 0x0 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 3, -2 }, - [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), -3, -2 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 1, 0x0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 2, 0x0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), -2, 0x0 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 3, -2 }, + [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), -3, -2 }, }; for (cases) |case| { diff --git a/lib/compiler_rt/int_from_float.zig b/lib/compiler_rt/int_from_float.zig index 78397a813108..aa2f78f922c0 100644 --- a/lib/compiler_rt/int_from_float.zig +++ b/lib/compiler_rt/int_from_float.zig @@ -17,9 +17,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I { const sig_mask = (@as(rep_t, 1) << sig_bits) - 1; // Break a into sign, exponent, significand - const a_rep: rep_t = @bitCast(rep_t, a); + const a_rep: rep_t = @as(rep_t, @bitCast(a)); const negative = (a_rep >> (float_bits - 1)) != 0; - const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias; + const exponent = @as(i32, @intCast((a_rep << 1) >> (sig_bits + 1))) - exp_bias; const significand: rep_t = (a_rep & sig_mask) | implicit_bit; // If the exponent is negative, the result rounds to zero. @@ -29,9 +29,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I { switch (@typeInfo(I).Int.signedness) { .unsigned => { if (negative) return 0; - if (@intCast(c_uint, exponent) >= @min(int_bits, max_exp)) return math.maxInt(I); + if (@as(c_uint, @intCast(exponent)) >= @min(int_bits, max_exp)) return math.maxInt(I); }, - .signed => if (@intCast(c_uint, exponent) >= @min(int_bits - 1, max_exp)) { + .signed => if (@as(c_uint, @intCast(exponent)) >= @min(int_bits - 1, max_exp)) { return if (negative) math.minInt(I) else math.maxInt(I); }, } @@ -40,9 +40,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I { // Otherwise, shift left. var result: I = undefined; if (exponent < fractional_bits) { - result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent)); + result = @as(I, @intCast(significand >> @as(Log2Int(rep_t), @intCast(fractional_bits - exponent)))); } else { - result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits); + result = @as(I, @intCast(significand)) << @as(Log2Int(I), @intCast(exponent - fractional_bits)); } if ((@typeInfo(I).Int.signedness == .signed) and negative) diff --git a/lib/compiler_rt/log.zig b/lib/compiler_rt/log.zig index 622d509a2f86..9c4b0096aa20 100644 --- a/lib/compiler_rt/log.zig +++ b/lib/compiler_rt/log.zig @@ -27,7 +27,7 @@ comptime { pub fn __logh(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, logf(a)); + return @as(f16, @floatCast(logf(a))); } pub fn logf(x_: f32) callconv(.C) f32 { @@ -39,7 +39,7 @@ pub fn logf(x_: f32) callconv(.C) f32 { const Lg4: f32 = 0xf89e26.0p-26; var x = x_; - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); var k: i32 = 0; // x < 2^(-126) @@ -56,7 +56,7 @@ pub fn logf(x_: f32) callconv(.C) f32 { // subnormal, scale x k -= 25; x *= 0x1.0p25; - ix = @bitCast(u32, x); + ix = @as(u32, @bitCast(x)); } else if (ix >= 0x7F800000) { return x; } else if (ix == 0x3F800000) { @@ -65,9 +65,9 @@ pub fn logf(x_: f32) callconv(.C) f32 { // x into [sqrt(2) / 2, sqrt(2)] ix += 0x3F800000 - 0x3F3504F3; - k += @intCast(i32, ix >> 23) - 0x7F; + k += @as(i32, @intCast(ix >> 23)) - 0x7F; ix = (ix & 0x007FFFFF) + 0x3F3504F3; - x = @bitCast(f32, ix); + x = @as(f32, @bitCast(ix)); const f = x - 1.0; const s = f / (2.0 + f); @@ -77,7 +77,7 @@ pub fn logf(x_: f32) callconv(.C) f32 { const t2 = z * (Lg1 + w * Lg3); const R = t2 + t1; const hfsq = 0.5 * f * f; - const dk = @floatFromInt(f32, k); + const dk = @as(f32, @floatFromInt(k)); return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi; } @@ -94,8 +94,8 @@ pub fn log(x_: f64) callconv(.C) f64 { const Lg7: f64 = 1.479819860511658591e-01; var x = x_; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 0; if (hx < 0x00100000 or hx >> 31 != 0) { @@ -111,7 +111,7 @@ pub fn log(x_: f64) callconv(.C) f64 { // subnormal, scale x k -= 54; x *= 0x1.0p54; - hx = @intCast(u32, @bitCast(u64, ix) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(ix)) >> 32)); } else if (hx >= 0x7FF00000) { return x; } else if (hx == 0x3FF00000 and ix << 32 == 0) { @@ -120,10 +120,10 @@ pub fn log(x_: f64) callconv(.C) f64 { // x into [sqrt(2) / 2, sqrt(2)] hx += 0x3FF00000 - 0x3FE6A09E; - k += @intCast(i32, hx >> 20) - 0x3FF; + k += @as(i32, @intCast(hx >> 20)) - 0x3FF; hx = (hx & 0x000FFFFF) + 0x3FE6A09E; ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF); - x = @bitCast(f64, ix); + x = @as(f64, @bitCast(ix)); const f = x - 1.0; const hfsq = 0.5 * f * f; @@ -133,19 +133,19 @@ pub fn log(x_: f64) callconv(.C) f64 { const t1 = w * (Lg2 + w * (Lg4 + w * Lg6)); const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7))); const R = t2 + t1; - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi; } pub fn __logx(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, logq(a)); + return @as(f80, @floatCast(logq(a))); } pub fn logq(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return log(@floatCast(f64, a)); + return log(@as(f64, @floatCast(a))); } pub fn logl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/log10.zig b/lib/compiler_rt/log10.zig index d45a3d8a40cd..bbd6392d96a8 100644 --- a/lib/compiler_rt/log10.zig +++ b/lib/compiler_rt/log10.zig @@ -28,7 +28,7 @@ comptime { pub fn __log10h(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, log10f(a)); + return @as(f16, @floatCast(log10f(a))); } pub fn log10f(x_: f32) callconv(.C) f32 { @@ -42,7 +42,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 { const Lg4: f32 = 0xf89e26.0p-26; var x = x_; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); var ix = u; var k: i32 = 0; @@ -59,7 +59,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 { k -= 25; x *= 0x1.0p25; - ix = @bitCast(u32, x); + ix = @as(u32, @bitCast(x)); } else if (ix >= 0x7F800000) { return x; } else if (ix == 0x3F800000) { @@ -68,9 +68,9 @@ pub fn log10f(x_: f32) callconv(.C) f32 { // x into [sqrt(2) / 2, sqrt(2)] ix += 0x3F800000 - 0x3F3504F3; - k += @intCast(i32, ix >> 23) - 0x7F; + k += @as(i32, @intCast(ix >> 23)) - 0x7F; ix = (ix & 0x007FFFFF) + 0x3F3504F3; - x = @bitCast(f32, ix); + x = @as(f32, @bitCast(ix)); const f = x - 1.0; const s = f / (2.0 + f); @@ -82,11 +82,11 @@ pub fn log10f(x_: f32) callconv(.C) f32 { const hfsq = 0.5 * f * f; var hi = f - hfsq; - u = @bitCast(u32, hi); + u = @as(u32, @bitCast(hi)); u &= 0xFFFFF000; - hi = @bitCast(f32, u); + hi = @as(f32, @bitCast(u)); const lo = f - hi - hfsq + s * (hfsq + R); - const dk = @floatFromInt(f32, k); + const dk = @as(f32, @floatFromInt(k)); return dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi + hi * ivln10hi + dk * log10_2hi; } @@ -105,8 +105,8 @@ pub fn log10(x_: f64) callconv(.C) f64 { const Lg7: f64 = 1.479819860511658591e-01; var x = x_; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 0; if (hx < 0x00100000 or hx >> 31 != 0) { @@ -122,7 +122,7 @@ pub fn log10(x_: f64) callconv(.C) f64 { // subnormal, scale x k -= 54; x *= 0x1.0p54; - hx = @intCast(u32, @bitCast(u64, x) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32)); } else if (hx >= 0x7FF00000) { return x; } else if (hx == 0x3FF00000 and ix << 32 == 0) { @@ -131,10 +131,10 @@ pub fn log10(x_: f64) callconv(.C) f64 { // x into [sqrt(2) / 2, sqrt(2)] hx += 0x3FF00000 - 0x3FE6A09E; - k += @intCast(i32, hx >> 20) - 0x3FF; + k += @as(i32, @intCast(hx >> 20)) - 0x3FF; hx = (hx & 0x000FFFFF) + 0x3FE6A09E; ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF); - x = @bitCast(f64, ix); + x = @as(f64, @bitCast(ix)); const f = x - 1.0; const hfsq = 0.5 * f * f; @@ -147,14 +147,14 @@ pub fn log10(x_: f64) callconv(.C) f64 { // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f) var hi = f - hfsq; - var hii = @bitCast(u64, hi); + var hii = @as(u64, @bitCast(hi)); hii &= @as(u64, maxInt(u64)) << 32; - hi = @bitCast(f64, hii); + hi = @as(f64, @bitCast(hii)); const lo = f - hi - hfsq + s * (hfsq + R); // val_hi + val_lo ~ log10(1 + f) + k * log10(2) var val_hi = hi * ivln10hi; - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); const y = dk * log10_2hi; var val_lo = dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi; @@ -168,12 +168,12 @@ pub fn log10(x_: f64) callconv(.C) f64 { pub fn __log10x(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, log10q(a)); + return @as(f80, @floatCast(log10q(a))); } pub fn log10q(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return log10(@floatCast(f64, a)); + return log10(@as(f64, @floatCast(a))); } pub fn log10l(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/log2.zig b/lib/compiler_rt/log2.zig index 29595d07d9aa..f3d80879d0c9 100644 --- a/lib/compiler_rt/log2.zig +++ b/lib/compiler_rt/log2.zig @@ -28,7 +28,7 @@ comptime { pub fn __log2h(a: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, log2f(a)); + return @as(f16, @floatCast(log2f(a))); } pub fn log2f(x_: f32) callconv(.C) f32 { @@ -40,7 +40,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 { const Lg4: f32 = 0xf89e26.0p-26; var x = x_; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); var ix = u; var k: i32 = 0; @@ -57,7 +57,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 { k -= 25; x *= 0x1.0p25; - ix = @bitCast(u32, x); + ix = @as(u32, @bitCast(x)); } else if (ix >= 0x7F800000) { return x; } else if (ix == 0x3F800000) { @@ -66,9 +66,9 @@ pub fn log2f(x_: f32) callconv(.C) f32 { // x into [sqrt(2) / 2, sqrt(2)] ix += 0x3F800000 - 0x3F3504F3; - k += @intCast(i32, ix >> 23) - 0x7F; + k += @as(i32, @intCast(ix >> 23)) - 0x7F; ix = (ix & 0x007FFFFF) + 0x3F3504F3; - x = @bitCast(f32, ix); + x = @as(f32, @bitCast(ix)); const f = x - 1.0; const s = f / (2.0 + f); @@ -80,11 +80,11 @@ pub fn log2f(x_: f32) callconv(.C) f32 { const hfsq = 0.5 * f * f; var hi = f - hfsq; - u = @bitCast(u32, hi); + u = @as(u32, @bitCast(hi)); u &= 0xFFFFF000; - hi = @bitCast(f32, u); + hi = @as(f32, @bitCast(u)); const lo = f - hi - hfsq + s * (hfsq + R); - return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @floatFromInt(f32, k); + return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @as(f32, @floatFromInt(k)); } pub fn log2(x_: f64) callconv(.C) f64 { @@ -99,8 +99,8 @@ pub fn log2(x_: f64) callconv(.C) f64 { const Lg7: f64 = 1.479819860511658591e-01; var x = x_; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 0; if (hx < 0x00100000 or hx >> 31 != 0) { @@ -116,7 +116,7 @@ pub fn log2(x_: f64) callconv(.C) f64 { // subnormal, scale x k -= 54; x *= 0x1.0p54; - hx = @intCast(u32, @bitCast(u64, x) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32)); } else if (hx >= 0x7FF00000) { return x; } else if (hx == 0x3FF00000 and ix << 32 == 0) { @@ -125,10 +125,10 @@ pub fn log2(x_: f64) callconv(.C) f64 { // x into [sqrt(2) / 2, sqrt(2)] hx += 0x3FF00000 - 0x3FE6A09E; - k += @intCast(i32, hx >> 20) - 0x3FF; + k += @as(i32, @intCast(hx >> 20)) - 0x3FF; hx = (hx & 0x000FFFFF) + 0x3FE6A09E; ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF); - x = @bitCast(f64, ix); + x = @as(f64, @bitCast(ix)); const f = x - 1.0; const hfsq = 0.5 * f * f; @@ -141,16 +141,16 @@ pub fn log2(x_: f64) callconv(.C) f64 { // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f) var hi = f - hfsq; - var hii = @bitCast(u64, hi); + var hii = @as(u64, @bitCast(hi)); hii &= @as(u64, maxInt(u64)) << 32; - hi = @bitCast(f64, hii); + hi = @as(f64, @bitCast(hii)); const lo = f - hi - hfsq + s * (hfsq + R); var val_hi = hi * ivln2hi; var val_lo = (lo + hi) * ivln2lo + lo * ivln2hi; // spadd(val_hi, val_lo, y) - const y = @floatFromInt(f64, k); + const y = @as(f64, @floatFromInt(k)); const ww = y + val_hi; val_lo += (y - ww) + val_hi; val_hi = ww; @@ -160,12 +160,12 @@ pub fn log2(x_: f64) callconv(.C) f64 { pub fn __log2x(a: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, log2q(a)); + return @as(f80, @floatCast(log2q(a))); } pub fn log2q(a: f128) callconv(.C) f128 { // TODO: more correct implementation - return log2(@floatCast(f64, a)); + return log2(@as(f64, @floatCast(a))); } pub fn log2l(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/modti3.zig b/lib/compiler_rt/modti3.zig index ef02a697bc81..97b005481b68 100644 --- a/lib/compiler_rt/modti3.zig +++ b/lib/compiler_rt/modti3.zig @@ -24,7 +24,7 @@ pub fn __modti3(a: i128, b: i128) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { - return @bitCast(v2u64, mod(@bitCast(i128, a), @bitCast(i128, b))); + return @as(v2u64, @bitCast(mod(@as(i128, @bitCast(a)), @as(i128, @bitCast(b))))); } inline fn mod(a: i128, b: i128) i128 { @@ -35,8 +35,8 @@ inline fn mod(a: i128, b: i128) i128 { const bn = (b ^ s_b) -% s_b; // negate if s == -1 var r: u128 = undefined; - _ = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), &r); - return (@bitCast(i128, r) ^ s_a) -% s_a; // negate if s == -1 + _ = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), &r); + return (@as(i128, @bitCast(r)) ^ s_a) -% s_a; // negate if s == -1 } test { diff --git a/lib/compiler_rt/modti3_test.zig b/lib/compiler_rt/modti3_test.zig index c7cee57f8bea..cad78f68bfa9 100644 --- a/lib/compiler_rt/modti3_test.zig +++ b/lib/compiler_rt/modti3_test.zig @@ -33,5 +33,5 @@ fn make_ti(high: u64, low: u64) i128 { var result: u128 = high; result <<= 64; result |= low; - return @bitCast(i128, result); + return @as(i128, @bitCast(result)); } diff --git a/lib/compiler_rt/mulXi3.zig b/lib/compiler_rt/mulXi3.zig index 3999681034c6..be3a444ce6e6 100644 --- a/lib/compiler_rt/mulXi3.zig +++ b/lib/compiler_rt/mulXi3.zig @@ -21,8 +21,8 @@ comptime { } pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 { - var ua = @bitCast(u32, a); - var ub = @bitCast(u32, b); + var ua = @as(u32, @bitCast(a)); + var ub = @as(u32, @bitCast(b)); var r: u32 = 0; while (ua > 0) { @@ -31,7 +31,7 @@ pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 { ub <<= 1; } - return @bitCast(i32, r); + return @as(i32, @bitCast(r)); } pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 { @@ -93,7 +93,7 @@ pub fn __multi3(a: i128, b: i128) callconv(.C) i128 { const v2u64 = @Vector(2, u64); fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { - return @bitCast(v2u64, mulX(i128, @bitCast(i128, a), @bitCast(i128, b))); + return @as(v2u64, @bitCast(mulX(i128, @as(i128, @bitCast(a)), @as(i128, @bitCast(b))))); } test { diff --git a/lib/compiler_rt/mulXi3_test.zig b/lib/compiler_rt/mulXi3_test.zig index 128f428af2f5..3a360098c47b 100644 --- a/lib/compiler_rt/mulXi3_test.zig +++ b/lib/compiler_rt/mulXi3_test.zig @@ -46,14 +46,14 @@ test "mulsi3" { try test_one_mulsi3(-46340, 46340, -2147395600); try test_one_mulsi3(46340, -46340, -2147395600); try test_one_mulsi3(-46340, -46340, 2147395600); - try test_one_mulsi3(4194303, 8192, @truncate(i32, 34359730176)); - try test_one_mulsi3(-4194303, 8192, @truncate(i32, -34359730176)); - try test_one_mulsi3(4194303, -8192, @truncate(i32, -34359730176)); - try test_one_mulsi3(-4194303, -8192, @truncate(i32, 34359730176)); - try test_one_mulsi3(8192, 4194303, @truncate(i32, 34359730176)); - try test_one_mulsi3(-8192, 4194303, @truncate(i32, -34359730176)); - try test_one_mulsi3(8192, -4194303, @truncate(i32, -34359730176)); - try test_one_mulsi3(-8192, -4194303, @truncate(i32, 34359730176)); + try test_one_mulsi3(4194303, 8192, @as(i32, @truncate(34359730176))); + try test_one_mulsi3(-4194303, 8192, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(4194303, -8192, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(-4194303, -8192, @as(i32, @truncate(34359730176))); + try test_one_mulsi3(8192, 4194303, @as(i32, @truncate(34359730176))); + try test_one_mulsi3(-8192, 4194303, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(8192, -4194303, @as(i32, @truncate(-34359730176))); + try test_one_mulsi3(-8192, -4194303, @as(i32, @truncate(34359730176))); } test "muldi3" { diff --git a/lib/compiler_rt/mulf3.zig b/lib/compiler_rt/mulf3.zig index 9652782a4968..a0320333adb4 100644 --- a/lib/compiler_rt/mulf3.zig +++ b/lib/compiler_rt/mulf3.zig @@ -28,53 +28,53 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { const significandMask = (@as(Z, 1) << significandBits) - 1; const absMask = signBit - 1; - const qnanRep = @bitCast(Z, math.nan(T)) | quietBit; - const infRep = @bitCast(Z, math.inf(T)); - const minNormalRep = @bitCast(Z, math.floatMin(T)); + const qnanRep = @as(Z, @bitCast(math.nan(T))) | quietBit; + const infRep = @as(Z, @bitCast(math.inf(T))); + const minNormalRep = @as(Z, @bitCast(math.floatMin(T))); const ZExp = if (typeWidth >= 32) u32 else Z; - const aExponent = @truncate(ZExp, (@bitCast(Z, a) >> significandBits) & maxExponent); - const bExponent = @truncate(ZExp, (@bitCast(Z, b) >> significandBits) & maxExponent); - const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit; + const aExponent = @as(ZExp, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent)); + const bExponent = @as(ZExp, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent)); + const productSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit; - var aSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, a) & significandMask); - var bSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, b) & significandMask); + var aSignificand: ZSignificand = @as(ZSignificand, @intCast(@as(Z, @bitCast(a)) & significandMask)); + var bSignificand: ZSignificand = @as(ZSignificand, @intCast(@as(Z, @bitCast(b)) & significandMask)); var scale: i32 = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) { - const aAbs: Z = @bitCast(Z, a) & absMask; - const bAbs: Z = @bitCast(Z, b) & absMask; + const aAbs: Z = @as(Z, @bitCast(a)) & absMask; + const bAbs: Z = @as(Z, @bitCast(b)) & absMask; // NaN * anything = qNaN - if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit); + if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit)); // anything * NaN = qNaN - if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit); + if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit)); if (aAbs == infRep) { // infinity * non-zero = +/- infinity if (bAbs != 0) { - return @bitCast(T, aAbs | productSign); + return @as(T, @bitCast(aAbs | productSign)); } else { // infinity * zero = NaN - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } } if (bAbs == infRep) { //? non-zero * infinity = +/- infinity if (aAbs != 0) { - return @bitCast(T, bAbs | productSign); + return @as(T, @bitCast(bAbs | productSign)); } else { // zero * infinity = NaN - return @bitCast(T, qnanRep); + return @as(T, @bitCast(qnanRep)); } } // zero * anything = +/- zero - if (aAbs == 0) return @bitCast(T, productSign); + if (aAbs == 0) return @as(T, @bitCast(productSign)); // anything * zero = +/- zero - if (bAbs == 0) return @bitCast(T, productSign); + if (bAbs == 0) return @as(T, @bitCast(productSign)); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to @@ -99,7 +99,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { const left_align_shift = ZSignificandBits - fractionalBits - 1; common.wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo); - var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale; + var productExponent: i32 = @as(i32, @intCast(aExponent + bExponent)) - exponentBias + scale; // Normalize the significand, adjust exponent if needed. if ((productHi & integerBit) != 0) { @@ -110,7 +110,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { } // If we have overflowed the type, return +/- infinity. - if (productExponent >= maxExponent) return @bitCast(T, infRep | productSign); + if (productExponent >= maxExponent) return @as(T, @bitCast(infRep | productSign)); var result: Z = undefined; if (productExponent <= 0) { @@ -120,8 +120,8 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { // a zero of the appropriate sign. Mathematically there is no need to // handle this case separately, but we make it a special case to // simplify the shift logic. - const shift: u32 = @truncate(u32, @as(Z, 1) -% @bitCast(u32, productExponent)); - if (shift >= ZSignificandBits) return @bitCast(T, productSign); + const shift: u32 = @as(u32, @truncate(@as(Z, 1) -% @as(u32, @bitCast(productExponent)))); + if (shift >= ZSignificandBits) return @as(T, @bitCast(productSign)); // Otherwise, shift the significand of the result so that the round // bit is the high bit of productLo. @@ -135,7 +135,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { } else { // Result is normal before rounding; insert the exponent. result = productHi & significandMask; - result |= @intCast(Z, productExponent) << significandBits; + result |= @as(Z, @intCast(productExponent)) << significandBits; } // Final rounding. The final result may overflow to infinity, or underflow @@ -156,7 +156,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T { // Insert the sign of the result: result |= productSign; - return @bitCast(T, result); + return @as(T, @bitCast(result)); } /// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero) @@ -168,12 +168,12 @@ fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool { const S = math.Log2Int(Z); var inexact = false; if (count < typeWidth) { - inexact = (lo.* << @intCast(S, typeWidth -% count)) != 0; - lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count)); - hi.* = hi.* >> @intCast(S, count); + inexact = (lo.* << @as(S, @intCast(typeWidth -% count))) != 0; + lo.* = (hi.* << @as(S, @intCast(typeWidth -% count))) | (lo.* >> @as(S, @intCast(count))); + hi.* = hi.* >> @as(S, @intCast(count)); } else if (count < 2 * typeWidth) { - inexact = (hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0; - lo.* = hi.* >> @intCast(S, count -% typeWidth); + inexact = (hi.* << @as(S, @intCast(2 * typeWidth -% count)) | lo.*) != 0; + lo.* = hi.* >> @as(S, @intCast(count -% typeWidth)); hi.* = 0; } else { inexact = (hi.* | lo.*) != 0; @@ -188,7 +188,7 @@ fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 { const integerBit = @as(Z, 1) << math.floatFractionalBits(T); const shift = @clz(significand.*) - @clz(integerBit); - significand.* <<= @intCast(math.Log2Int(Z), shift); + significand.* <<= @as(math.Log2Int(Z), @intCast(shift)); return @as(i32, 1) - shift; } diff --git a/lib/compiler_rt/mulf3_test.zig b/lib/compiler_rt/mulf3_test.zig index 203745e632e7..afaf6cb2192d 100644 --- a/lib/compiler_rt/mulf3_test.zig +++ b/lib/compiler_rt/mulf3_test.zig @@ -4,8 +4,8 @@ const std = @import("std"); const math = std.math; -const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64); -const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64); +const qnan128 = @as(f128, @bitCast(@as(u128, 0x7fff800000000000) << 64)); +const inf128 = @as(f128, @bitCast(@as(u128, 0x7fff000000000000) << 64)); const __multf3 = @import("multf3.zig").__multf3; const __mulxf3 = @import("mulxf3.zig").__mulxf3; @@ -16,9 +16,9 @@ const __mulsf3 = @import("mulsf3.zig").__mulsf3; // use two 64-bit integers intead of one 128-bit integer // because 128-bit integer constant can't be assigned directly fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool { - const rep = @bitCast(u128, result); - const hi = @intCast(u64, rep >> 64); - const lo = @truncate(u64, rep); + const rep = @as(u128, @bitCast(result)); + const hi = @as(u64, @intCast(rep >> 64)); + const lo = @as(u64, @truncate(rep)); if (hi == expectedHi and lo == expectedLo) { return true; @@ -45,7 +45,7 @@ fn test__multf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void { fn makeNaN128(rand: u64) f128 { const int_result = @as(u128, 0x7fff000000000000 | (rand & 0xffffffffffff)) << 64; - const float_result = @bitCast(f128, int_result); + const float_result = @as(f128, @bitCast(int_result)); return float_result; } test "multf3" { @@ -60,15 +60,15 @@ test "multf3" { // any * any try test__multf3( - @bitCast(f128, @as(u128, 0x40042eab345678439abcdefea5678234)), - @bitCast(f128, @as(u128, 0x3ffeedcb34a235253948765432134675)), + @as(f128, @bitCast(@as(u128, 0x40042eab345678439abcdefea5678234))), + @as(f128, @bitCast(@as(u128, 0x3ffeedcb34a235253948765432134675))), 0x400423e7f9e3c9fc, 0xd906c2c2a85777c4, ); try test__multf3( - @bitCast(f128, @as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50)), - @bitCast(f128, @as(u128, 0x3ff6ed8764648369535adf4be3214568)), + @as(f128, @bitCast(@as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50))), + @as(f128, @bitCast(@as(u128, 0x3ff6ed8764648369535adf4be3214568))), 0x3fc52a163c6223fc, 0xc94c4bf0430768b4, ); @@ -81,8 +81,8 @@ test "multf3" { ); try test__multf3( - @bitCast(f128, @as(u128, 0x3f154356473c82a9fabf2d22ace345df)), - @bitCast(f128, @as(u128, 0x3e38eda98765476743ab21da23d45679)), + @as(f128, @bitCast(@as(u128, 0x3f154356473c82a9fabf2d22ace345df))), + @as(f128, @bitCast(@as(u128, 0x3e38eda98765476743ab21da23d45679))), 0x3d4f37c1a3137cae, 0xfc6807048bc2836a, ); @@ -108,16 +108,16 @@ test "multf3" { try test__multf3(2.0, math.floatTrueMin(f128), 0x0000_0000_0000_0000, 0x0000_0000_0000_0002); } -const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1))); +const qnan80 = @as(f80, @bitCast(@as(u80, @bitCast(math.nan(f80))) | (1 << (math.floatFractionalBits(f80) - 1)))); fn test__mulxf3(a: f80, b: f80, expected: u80) !void { const x = __mulxf3(a, b); - const rep = @bitCast(u80, x); + const rep = @as(u80, @bitCast(x)); if (rep == expected) return; - if (math.isNan(@bitCast(f80, expected)) and math.isNan(x)) + if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x)) return; // We don't currently test NaN payload propagation return error.TestFailed; @@ -125,33 +125,33 @@ fn test__mulxf3(a: f80, b: f80, expected: u80) !void { test "mulxf3" { // NaN * any = NaN - try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); - try test__mulxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80)); + try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); + try test__mulxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80))); // any * NaN = NaN - try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80)); - try test__mulxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80)); + try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @as(u80, @bitCast(qnan80))); + try test__mulxf3(0x1.23456789abcdefp+5, @as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), @as(u80, @bitCast(qnan80))); // NaN * inf = NaN - try test__mulxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80)); + try test__mulxf3(qnan80, math.inf(f80), @as(u80, @bitCast(qnan80))); // inf * NaN = NaN - try test__mulxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80)); + try test__mulxf3(math.inf(f80), qnan80, @as(u80, @bitCast(qnan80))); // inf * inf = inf - try test__mulxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__mulxf3(math.inf(f80), math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // inf * -inf = -inf - try test__mulxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, -math.inf(f80))); + try test__mulxf3(math.inf(f80), -math.inf(f80), @as(u80, @bitCast(-math.inf(f80)))); // -inf + inf = -inf - try test__mulxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, -math.inf(f80))); + try test__mulxf3(-math.inf(f80), math.inf(f80), @as(u80, @bitCast(-math.inf(f80)))); // inf * any = inf - try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80))); + try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @as(u80, @bitCast(math.inf(f80)))); // any * inf = inf - try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80))); + try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @as(u80, @bitCast(math.inf(f80)))); // any * any try test__mulxf3(0x1.0p+0, 0x1.dcba987654321p+5, 0x4004_ee5d_4c3b_2a19_0800); diff --git a/lib/compiler_rt/mulo.zig b/lib/compiler_rt/mulo.zig index 13e58a78005c..d40554da1080 100644 --- a/lib/compiler_rt/mulo.zig +++ b/lib/compiler_rt/mulo.zig @@ -45,7 +45,7 @@ inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int) //invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1} if (res < min or max < res) overflow.* = 1; - return @truncate(ST, res); + return @as(ST, @truncate(res)); } pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 { diff --git a/lib/compiler_rt/mulodi4_test.zig b/lib/compiler_rt/mulodi4_test.zig index 3944f62ede03..37530b10605f 100644 --- a/lib/compiler_rt/mulodi4_test.zig +++ b/lib/compiler_rt/mulodi4_test.zig @@ -54,34 +54,34 @@ test "mulodi4" { try test__mulodi4(0x7FFFFFFFFFFFFFFF, -2, 2, 1); try test__mulodi4(-2, 0x7FFFFFFFFFFFFFFF, 2, 1); - try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); - try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); + try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); + try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 0, 0, 0); try test__mulodi4(0, 0x7FFFFFFFFFFFFFFF, 0, 0); try test__mulodi4(0x7FFFFFFFFFFFFFFF, 1, 0x7FFFFFFFFFFFFFFF, 0); try test__mulodi4(1, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0); - try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); - try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); + try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); + try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0, 0); - try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)), 0); - try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(-2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(-1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0, 0, 0); + try test__mulodi4(0, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0, 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0); + try test__mulodi4(1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1); - try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 1); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -1, 0x7FFFFFFFFFFFFFFF, 0); - try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0x7FFFFFFFFFFFFFFF, 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0, 0); - try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0); - try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 0); - try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1); - try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); + try test__mulodi4(-2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -1, 0x7FFFFFFFFFFFFFFF, 0); + try test__mulodi4(-1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0x7FFFFFFFFFFFFFFF, 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0, 0, 0); + try test__mulodi4(0, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0, 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); + try test__mulodi4(1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0); + try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); + try test__mulodi4(2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1); } diff --git a/lib/compiler_rt/mulosi4_test.zig b/lib/compiler_rt/mulosi4_test.zig index 523faa490f37..a6e8178129b7 100644 --- a/lib/compiler_rt/mulosi4_test.zig +++ b/lib/compiler_rt/mulosi4_test.zig @@ -37,36 +37,36 @@ test "mulosi4" { try test__mulosi4(1, -0x1234567, -0x1234567, 0); try test__mulosi4(-0x1234567, 1, -0x1234567, 0); - try test__mulosi4(0x7FFFFFFF, -2, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(-2, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(0x7FFFFFFF, -1, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__mulosi4(-1, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__mulosi4(0x7FFFFFFF, -2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(-2, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(0x7FFFFFFF, -1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__mulosi4(-1, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); try test__mulosi4(0x7FFFFFFF, 0, 0, 0); try test__mulosi4(0, 0x7FFFFFFF, 0, 0); try test__mulosi4(0x7FFFFFFF, 1, 0x7FFFFFFF, 0); try test__mulosi4(1, 0x7FFFFFFF, 0x7FFFFFFF, 0); - try test__mulosi4(0x7FFFFFFF, 2, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(2, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 1); + try test__mulosi4(0x7FFFFFFF, 2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(2, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), -2, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(-2, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), -1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(-1, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 0, 0, 0); - try test__mulosi4(0, @bitCast(i32, @as(u32, 0x80000000)), 0, 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 1, @bitCast(i32, @as(u32, 0x80000000)), 0); - try test__mulosi4(1, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 2, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(2, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), -2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(-2, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), -1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(-1, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 0, 0, 0); + try test__mulosi4(0, @as(i32, @bitCast(@as(u32, 0x80000000))), 0, 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 1, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); + try test__mulosi4(1, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(2, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), -2, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(-2, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), -1, 0x7FFFFFFF, 0); - try test__mulosi4(-1, @bitCast(i32, @as(u32, 0x80000001)), 0x7FFFFFFF, 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 0, 0, 0); - try test__mulosi4(0, @bitCast(i32, @as(u32, 0x80000001)), 0, 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 1, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__mulosi4(1, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 2, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__mulosi4(2, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), -2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(-2, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), -1, 0x7FFFFFFF, 0); + try test__mulosi4(-1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0x7FFFFFFF, 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 0, 0, 0); + try test__mulosi4(0, @as(i32, @bitCast(@as(u32, 0x80000001))), 0, 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__mulosi4(1, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); + try test__mulosi4(2, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1); } diff --git a/lib/compiler_rt/muloti4_test.zig b/lib/compiler_rt/muloti4_test.zig index 6d204ff785b5..0b5413dba3ec 100644 --- a/lib/compiler_rt/muloti4_test.zig +++ b/lib/compiler_rt/muloti4_test.zig @@ -52,38 +52,38 @@ test "muloti4" { try test__muloti4(2097152, -4398046511103, -9223372036852678656, 0); try test__muloti4(-2097152, -4398046511103, 9223372036852678656, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x00000000000000B504F333F9DE5BE000)), @bitCast(i128, @as(u128, 0x000000000000000000B504F333F9DE5B)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFF328DF915DA296E8A000)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(-2, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x00000000000000B504F333F9DE5BE000))), @as(i128, @bitCast(@as(u128, 0x000000000000000000B504F333F9DE5B))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFF328DF915DA296E8A000))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(-1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0, 0); - try test__muloti4(0, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(2, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), -1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0, 0, 0); + try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0, 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(-2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), -1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(-1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0, 0, 0); - try test__muloti4(0, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0); - try test__muloti4(1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), -1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0, 0, 0); + try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0, 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0); + try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(-2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), -1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(-1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0, 0, 0); - try test__muloti4(0, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0, 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0); - try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); - try test__muloti4(2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), -1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0, 0, 0); + try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0, 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0); + try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); + try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1); } diff --git a/lib/compiler_rt/negv.zig b/lib/compiler_rt/negv.zig index 5a26dc65e61f..64961255c3d5 100644 --- a/lib/compiler_rt/negv.zig +++ b/lib/compiler_rt/negv.zig @@ -33,7 +33,7 @@ inline fn negvXi(comptime ST: type, a: ST) ST { else => unreachable, }; const N: UT = @bitSizeOf(ST); - const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1))); + const min: ST = @as(ST, @bitCast((@as(UT, 1) << (N - 1)))); if (a == min) @panic("compiler_rt negv: overflow"); return -a; diff --git a/lib/compiler_rt/parity.zig b/lib/compiler_rt/parity.zig index ee6abf162e7e..02050ba6bc88 100644 --- a/lib/compiler_rt/parity.zig +++ b/lib/compiler_rt/parity.zig @@ -27,9 +27,9 @@ pub fn __parityti2(a: i128) callconv(.C) i32 { inline fn parityXi2(comptime T: type, a: T) i32 { var x = switch (@bitSizeOf(T)) { - 32 => @bitCast(u32, a), - 64 => @bitCast(u64, a), - 128 => @bitCast(u128, a), + 32 => @as(u32, @bitCast(a)), + 64 => @as(u64, @bitCast(a)), + 128 => @as(u128, @bitCast(a)), else => unreachable, }; // Bit Twiddling Hacks: Compute parity in parallel @@ -39,7 +39,7 @@ inline fn parityXi2(comptime T: type, a: T) i32 { shift = shift >> 1; } x &= 0xf; - return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1 + return (@as(u16, @intCast(0x6996)) >> @as(u4, @intCast(x))) & 1; // optimization for >>2 and >>1 } test { diff --git a/lib/compiler_rt/paritydi2_test.zig b/lib/compiler_rt/paritydi2_test.zig index 1cf587b1efcc..5ae8e2d2e75f 100644 --- a/lib/compiler_rt/paritydi2_test.zig +++ b/lib/compiler_rt/paritydi2_test.zig @@ -3,13 +3,13 @@ const parity = @import("parity.zig"); const testing = std.testing; fn paritydi2Naive(a: i64) i32 { - var x = @bitCast(u64, a); + var x = @as(u64, @bitCast(a)); var has_parity: bool = false; while (x > 0) { has_parity = !has_parity; x = x & (x - 1); } - return @intCast(i32, @intFromBool(has_parity)); + return @as(i32, @intCast(@intFromBool(has_parity))); } fn test__paritydi2(a: i64) !void { @@ -22,9 +22,9 @@ test "paritydi2" { try test__paritydi2(0); try test__paritydi2(1); try test__paritydi2(2); - try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffd))); - try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe))); - try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff))); + try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffd)))); + try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffe)))); + try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/paritysi2_test.zig b/lib/compiler_rt/paritysi2_test.zig index c1bac5eaaec6..3726170b5307 100644 --- a/lib/compiler_rt/paritysi2_test.zig +++ b/lib/compiler_rt/paritysi2_test.zig @@ -3,13 +3,13 @@ const parity = @import("parity.zig"); const testing = std.testing; fn paritysi2Naive(a: i32) i32 { - var x = @bitCast(u32, a); + var x = @as(u32, @bitCast(a)); var has_parity: bool = false; while (x > 0) { has_parity = !has_parity; x = x & (x - 1); } - return @intCast(i32, @intFromBool(has_parity)); + return @as(i32, @intCast(@intFromBool(has_parity))); } fn test__paritysi2(a: i32) !void { @@ -22,9 +22,9 @@ test "paritysi2" { try test__paritysi2(0); try test__paritysi2(1); try test__paritysi2(2); - try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffd))); - try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffe))); - try test__paritysi2(@bitCast(i32, @as(u32, 0xffffffff))); + try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xfffffffd)))); + try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xfffffffe)))); + try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/parityti2_test.zig b/lib/compiler_rt/parityti2_test.zig index 8a869fe7182f..6f6c2102b10f 100644 --- a/lib/compiler_rt/parityti2_test.zig +++ b/lib/compiler_rt/parityti2_test.zig @@ -3,13 +3,13 @@ const parity = @import("parity.zig"); const testing = std.testing; fn parityti2Naive(a: i128) i32 { - var x = @bitCast(u128, a); + var x = @as(u128, @bitCast(a)); var has_parity: bool = false; while (x > 0) { has_parity = !has_parity; x = x & (x - 1); } - return @intCast(i32, @intFromBool(has_parity)); + return @as(i32, @intCast(@intFromBool(has_parity))); } fn test__parityti2(a: i128) !void { @@ -22,9 +22,9 @@ test "parityti2" { try test__parityti2(0); try test__parityti2(1); try test__parityti2(2); - try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd))); - try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe))); - try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff))); + try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd)))); + try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)))); + try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/popcount.zig b/lib/compiler_rt/popcount.zig index ddb0b720c7bd..ab61b0d53524 100644 --- a/lib/compiler_rt/popcount.zig +++ b/lib/compiler_rt/popcount.zig @@ -37,7 +37,7 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 { i128 => u128, else => unreachable, }; - var x = @bitCast(UT, a); + var x = @as(UT, @bitCast(a)); x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles + (x & (~@as(UT, 0) / 5)); @@ -46,7 +46,7 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 { // 8 most significant bits of x + (x<<8) + (x<<16) + .. x *%= ~@as(UT, 0) / 255; // 0x01...01 x >>= (@bitSizeOf(ST) - 8); - return @intCast(i32, x); + return @as(i32, @intCast(x)); } test { diff --git a/lib/compiler_rt/popcountdi2_test.zig b/lib/compiler_rt/popcountdi2_test.zig index e02628e636ce..daf2c1f18332 100644 --- a/lib/compiler_rt/popcountdi2_test.zig +++ b/lib/compiler_rt/popcountdi2_test.zig @@ -5,8 +5,8 @@ const testing = std.testing; fn popcountdi2Naive(a: i64) i32 { var x = a; var r: i32 = 0; - while (x != 0) : (x = @bitCast(i64, @bitCast(u64, x) >> 1)) { - r += @intCast(i32, x & 1); + while (x != 0) : (x = @as(i64, @bitCast(@as(u64, @bitCast(x)) >> 1))) { + r += @as(i32, @intCast(x & 1)); } return r; } @@ -21,9 +21,9 @@ test "popcountdi2" { try test__popcountdi2(0); try test__popcountdi2(1); try test__popcountdi2(2); - try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffd))); - try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe))); - try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff))); + try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffd)))); + try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffe)))); + try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/popcountsi2_test.zig b/lib/compiler_rt/popcountsi2_test.zig index 7606b1a97e79..497b62516fb9 100644 --- a/lib/compiler_rt/popcountsi2_test.zig +++ b/lib/compiler_rt/popcountsi2_test.zig @@ -5,8 +5,8 @@ const testing = std.testing; fn popcountsi2Naive(a: i32) i32 { var x = a; var r: i32 = 0; - while (x != 0) : (x = @bitCast(i32, @bitCast(u32, x) >> 1)) { - r += @intCast(i32, x & 1); + while (x != 0) : (x = @as(i32, @bitCast(@as(u32, @bitCast(x)) >> 1))) { + r += @as(i32, @intCast(x & 1)); } return r; } @@ -21,9 +21,9 @@ test "popcountsi2" { try test__popcountsi2(0); try test__popcountsi2(1); try test__popcountsi2(2); - try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffd))); - try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffe))); - try test__popcountsi2(@bitCast(i32, @as(u32, 0xffffffff))); + try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xfffffffd)))); + try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xfffffffe)))); + try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/popcountti2_test.zig b/lib/compiler_rt/popcountti2_test.zig index fae2beccd4f4..b873bcd449f1 100644 --- a/lib/compiler_rt/popcountti2_test.zig +++ b/lib/compiler_rt/popcountti2_test.zig @@ -5,8 +5,8 @@ const testing = std.testing; fn popcountti2Naive(a: i128) i32 { var x = a; var r: i32 = 0; - while (x != 0) : (x = @bitCast(i128, @bitCast(u128, x) >> 1)) { - r += @intCast(i32, x & 1); + while (x != 0) : (x = @as(i128, @bitCast(@as(u128, @bitCast(x)) >> 1))) { + r += @as(i32, @intCast(x & 1)); } return r; } @@ -21,9 +21,9 @@ test "popcountti2" { try test__popcountti2(0); try test__popcountti2(1); try test__popcountti2(2); - try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd))); - try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe))); - try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff))); + try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd)))); + try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)))); + try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)))); const RndGen = std.rand.DefaultPrng; var rnd = RndGen.init(42); diff --git a/lib/compiler_rt/powiXf2.zig b/lib/compiler_rt/powiXf2.zig index b0cec3235d34..97dc1b77d108 100644 --- a/lib/compiler_rt/powiXf2.zig +++ b/lib/compiler_rt/powiXf2.zig @@ -25,7 +25,7 @@ inline fn powiXf2(comptime FT: type, a: FT, b: i32) FT { const is_recip: bool = b < 0; var r: FT = 1.0; while (true) { - if (@bitCast(u32, x_b) & @as(u32, 1) != 0) { + if (@as(u32, @bitCast(x_b)) & @as(u32, 1) != 0) { r *= x_a; } x_b = @divTrunc(x_b, @as(i32, 2)); diff --git a/lib/compiler_rt/powiXf2_test.zig b/lib/compiler_rt/powiXf2_test.zig index 5f7828c3e32f..7014d2a22714 100644 --- a/lib/compiler_rt/powiXf2_test.zig +++ b/lib/compiler_rt/powiXf2_test.zig @@ -49,76 +49,76 @@ test "powihf2" { try test__powihf2(0, 2, 0); try test__powihf2(0, 3, 0); try test__powihf2(0, 4, 0); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powihf2(-0.0, 1, -0.0); try test__powihf2(-0.0, 2, 0); try test__powihf2(-0.0, 3, -0.0); try test__powihf2(-0.0, 4, 0); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powihf2(1, 1, 1); try test__powihf2(1, 2, 1); try test__powihf2(1, 3, 1); try test__powihf2(1, 4, 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powihf2(inf_f16, 1, inf_f16); try test__powihf2(inf_f16, 2, inf_f16); try test__powihf2(inf_f16, 3, inf_f16); try test__powihf2(inf_f16, 4, inf_f16); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f16); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f16); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f16); try test__powihf2(-inf_f16, 1, -inf_f16); try test__powihf2(-inf_f16, 2, inf_f16); try test__powihf2(-inf_f16, 3, -inf_f16); try test__powihf2(-inf_f16, 4, inf_f16); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f16); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f16); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f16); // try test__powihf2(0, -1, inf_f16); try test__powihf2(0, -2, inf_f16); try test__powihf2(0, -3, inf_f16); try test__powihf2(0, -4, inf_f16); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // 0 ^ anything = +inf - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f16); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f16); // 0 ^ anything = +inf + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f16); + try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f16); try test__powihf2(-0.0, -1, -inf_f16); try test__powihf2(-0.0, -2, inf_f16); try test__powihf2(-0.0, -3, -inf_f16); try test__powihf2(-0.0, -4, inf_f16); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // -0 ^ anything even = +inf - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f16); // -0 ^ anything odd = -inf - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16); + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f16); // -0 ^ anything even = +inf + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f16); // -0 ^ anything odd = -inf + try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f16); try test__powihf2(1, -1, 1); try test__powihf2(1, -2, 1); try test__powihf2(1, -3, 1); try test__powihf2(1, -4, 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); // 1.0 ^ anything = 1 - try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); // 1.0 ^ anything = 1 + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powihf2(inf_f16, -1, 0); try test__powihf2(inf_f16, -2, 0); try test__powihf2(inf_f16, -3, 0); try test__powihf2(inf_f16, -4, 0); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); // try test__powihf2(-inf_f16, -1, -0.0); try test__powihf2(-inf_f16, -2, 0); try test__powihf2(-inf_f16, -3, -0.0); try test__powihf2(-inf_f16, -4, 0); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powihf2(2, 10, 1024.0); try test__powihf2(-2, 10, 1024.0); @@ -158,76 +158,76 @@ test "powisf2" { try test__powisf2(0, 2, 0); try test__powisf2(0, 3, 0); try test__powisf2(0, 4, 0); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powisf2(-0.0, 1, -0.0); try test__powisf2(-0.0, 2, 0); try test__powisf2(-0.0, 3, -0.0); try test__powisf2(-0.0, 4, 0); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powisf2(1, 1, 1); try test__powisf2(1, 2, 1); try test__powisf2(1, 3, 1); try test__powisf2(1, 4, 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powisf2(inf_f32, 1, inf_f32); try test__powisf2(inf_f32, 2, inf_f32); try test__powisf2(inf_f32, 3, inf_f32); try test__powisf2(inf_f32, 4, inf_f32); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f32); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f32); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f32); try test__powisf2(-inf_f32, 1, -inf_f32); try test__powisf2(-inf_f32, 2, inf_f32); try test__powisf2(-inf_f32, 3, -inf_f32); try test__powisf2(-inf_f32, 4, inf_f32); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f32); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f32); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f32); try test__powisf2(0, -1, inf_f32); try test__powisf2(0, -2, inf_f32); try test__powisf2(0, -3, inf_f32); try test__powisf2(0, -4, inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f32); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f32); + try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f32); try test__powisf2(-0.0, -1, -inf_f32); try test__powisf2(-0.0, -2, inf_f32); try test__powisf2(-0.0, -3, -inf_f32); try test__powisf2(-0.0, -4, inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f32); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f32); + try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f32); try test__powisf2(1, -1, 1); try test__powisf2(1, -2, 1); try test__powisf2(1, -3, 1); try test__powisf2(1, -4, 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powisf2(inf_f32, -1, 0); try test__powisf2(inf_f32, -2, 0); try test__powisf2(inf_f32, -3, 0); try test__powisf2(inf_f32, -4, 0); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powisf2(-inf_f32, -1, -0.0); try test__powisf2(-inf_f32, -2, 0); try test__powisf2(-inf_f32, -3, -0.0); try test__powisf2(-inf_f32, -4, 0); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powisf2(2.0, 10, 1024.0); try test__powisf2(-2, 10, 1024.0); @@ -263,76 +263,76 @@ test "powidf2" { try test__powidf2(0, 2, 0); try test__powidf2(0, 3, 0); try test__powidf2(0, 4, 0); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powidf2(-0.0, 1, -0.0); try test__powidf2(-0.0, 2, 0); try test__powidf2(-0.0, 3, -0.0); try test__powidf2(-0.0, 4, 0); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powidf2(1, 1, 1); try test__powidf2(1, 2, 1); try test__powidf2(1, 3, 1); try test__powidf2(1, 4, 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powidf2(inf_f64, 1, inf_f64); try test__powidf2(inf_f64, 2, inf_f64); try test__powidf2(inf_f64, 3, inf_f64); try test__powidf2(inf_f64, 4, inf_f64); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f64); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f64); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f64); try test__powidf2(-inf_f64, 1, -inf_f64); try test__powidf2(-inf_f64, 2, inf_f64); try test__powidf2(-inf_f64, 3, -inf_f64); try test__powidf2(-inf_f64, 4, inf_f64); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f64); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f64); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f64); try test__powidf2(0, -1, inf_f64); try test__powidf2(0, -2, inf_f64); try test__powidf2(0, -3, inf_f64); try test__powidf2(0, -4, inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f64); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f64); + try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f64); try test__powidf2(-0.0, -1, -inf_f64); try test__powidf2(-0.0, -2, inf_f64); try test__powidf2(-0.0, -3, -inf_f64); try test__powidf2(-0.0, -4, inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f64); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f64); + try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f64); try test__powidf2(1, -1, 1); try test__powidf2(1, -2, 1); try test__powidf2(1, -3, 1); try test__powidf2(1, -4, 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powidf2(inf_f64, -1, 0); try test__powidf2(inf_f64, -2, 0); try test__powidf2(inf_f64, -3, 0); try test__powidf2(inf_f64, -4, 0); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powidf2(-inf_f64, -1, -0.0); try test__powidf2(-inf_f64, -2, 0); try test__powidf2(-inf_f64, -3, -0.0); try test__powidf2(-inf_f64, -4, 0); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powidf2(2, 10, 1024.0); try test__powidf2(-2, 10, 1024.0); @@ -368,76 +368,76 @@ test "powitf2" { try test__powitf2(0, 2, 0); try test__powitf2(0, 3, 0); try test__powitf2(0, 4, 0); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); try test__powitf2(0, 0x7FFFFFFF, 0); try test__powitf2(-0.0, 1, -0.0); try test__powitf2(-0.0, 2, 0); try test__powitf2(-0.0, 3, -0.0); try test__powitf2(-0.0, 4, 0); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powitf2(1, 1, 1); try test__powitf2(1, 2, 1); try test__powitf2(1, 3, 1); try test__powitf2(1, 4, 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powitf2(inf_f128, 1, inf_f128); try test__powitf2(inf_f128, 2, inf_f128); try test__powitf2(inf_f128, 3, inf_f128); try test__powitf2(inf_f128, 4, inf_f128); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f128); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f128); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f128); try test__powitf2(-inf_f128, 1, -inf_f128); try test__powitf2(-inf_f128, 2, inf_f128); try test__powitf2(-inf_f128, 3, -inf_f128); try test__powitf2(-inf_f128, 4, inf_f128); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f128); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f128); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f128); try test__powitf2(0, -1, inf_f128); try test__powitf2(0, -2, inf_f128); try test__powitf2(0, -3, inf_f128); try test__powitf2(0, -4, inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f128); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f128); + try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f128); try test__powitf2(-0.0, -1, -inf_f128); try test__powitf2(-0.0, -2, inf_f128); try test__powitf2(-0.0, -3, -inf_f128); try test__powitf2(-0.0, -4, inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f128); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f128); + try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f128); try test__powitf2(1, -1, 1); try test__powitf2(1, -2, 1); try test__powitf2(1, -3, 1); try test__powitf2(1, -4, 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powitf2(inf_f128, -1, 0); try test__powitf2(inf_f128, -2, 0); try test__powitf2(inf_f128, -3, 0); try test__powitf2(inf_f128, -4, 0); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powitf2(-inf_f128, -1, -0.0); try test__powitf2(-inf_f128, -2, 0); try test__powitf2(-inf_f128, -3, -0.0); try test__powitf2(-inf_f128, -4, 0); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powitf2(2, 10, 1024.0); try test__powitf2(-2, 10, 1024.0); @@ -473,76 +473,76 @@ test "powixf2" { try test__powixf2(0, 2, 0); try test__powixf2(0, 3, 0); try test__powixf2(0, 4, 0); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0); try test__powixf2(-0.0, 1, -0.0); try test__powixf2(-0.0, 2, 0); try test__powixf2(-0.0, 3, -0.0); try test__powixf2(-0.0, 4, 0); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0); try test__powixf2(1, 1, 1); try test__powixf2(1, 2, 1); try test__powixf2(1, 3, 1); try test__powixf2(1, 4, 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1); try test__powixf2(inf_f80, 1, inf_f80); try test__powixf2(inf_f80, 2, inf_f80); try test__powixf2(inf_f80, 3, inf_f80); try test__powixf2(inf_f80, 4, inf_f80); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f80); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f80); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f80); try test__powixf2(-inf_f80, 1, -inf_f80); try test__powixf2(-inf_f80, 2, inf_f80); try test__powixf2(-inf_f80, 3, -inf_f80); try test__powixf2(-inf_f80, 4, inf_f80); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f80); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f80); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f80); try test__powixf2(0, -1, inf_f80); try test__powixf2(0, -2, inf_f80); try test__powixf2(0, -3, inf_f80); try test__powixf2(0, -4, inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f80); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f80); + try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f80); try test__powixf2(-0.0, -1, -inf_f80); try test__powixf2(-0.0, -2, inf_f80); try test__powixf2(-0.0, -3, -inf_f80); try test__powixf2(-0.0, -4, inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f80); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f80); + try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f80); try test__powixf2(1, -1, 1); try test__powixf2(1, -2, 1); try test__powixf2(1, -3, 1); try test__powixf2(1, -4, 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); - try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1); + try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1); try test__powixf2(inf_f80, -1, 0); try test__powixf2(inf_f80, -2, 0); try test__powixf2(inf_f80, -3, 0); try test__powixf2(inf_f80, -4, 0); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000001))), 0); + try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powixf2(-inf_f80, -1, -0.0); try test__powixf2(-inf_f80, -2, 0); try test__powixf2(-inf_f80, -3, -0.0); try test__powixf2(-inf_f80, -4, 0); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000002))), 0); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0); + try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000000))), 0); try test__powixf2(2, 10, 1024.0); try test__powixf2(-2, 10, 1024.0); diff --git a/lib/compiler_rt/rem_pio2.zig b/lib/compiler_rt/rem_pio2.zig index 315a99c308c1..14a8733e6639 100644 --- a/lib/compiler_rt/rem_pio2.zig +++ b/lib/compiler_rt/rem_pio2.zig @@ -26,7 +26,7 @@ const pio2_3 = 2.02226624871116645580e-21; // 0x3BA3198A, 0x2E000000 const pio2_3t = 8.47842766036889956997e-32; // 0x397B839A, 0x252049C1 fn U(x: anytype) usize { - return @intCast(usize, x); + return @as(usize, @intCast(x)); } fn medium(ix: u32, x: f64, y: *[2]f64) i32 { @@ -41,7 +41,7 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 { // rint(x/(pi/2)) @"fn" = x * invpio2 + toint - toint; - n = @intFromFloat(i32, @"fn"); + n = @as(i32, @intFromFloat(@"fn")); r = x - @"fn" * pio2_1; w = @"fn" * pio2_1t; // 1st round, good to 85 bits // Matters with directed rounding. @@ -57,17 +57,17 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 { w = @"fn" * pio2_1t; } y[0] = r - w; - ui = @bitCast(u64, y[0]); - ey = @intCast(i32, (ui >> 52) & 0x7ff); - ex = @intCast(i32, ix >> 20); + ui = @as(u64, @bitCast(y[0])); + ey = @as(i32, @intCast((ui >> 52) & 0x7ff)); + ex = @as(i32, @intCast(ix >> 20)); if (ex - ey > 16) { // 2nd round, good to 118 bits t = r; w = @"fn" * pio2_2; r = t - w; w = @"fn" * pio2_2t - ((t - r) - w); y[0] = r - w; - ui = @bitCast(u64, y[0]); - ey = @intCast(i32, (ui >> 52) & 0x7ff); + ui = @as(u64, @bitCast(y[0])); + ey = @as(i32, @intCast((ui >> 52) & 0x7ff)); if (ex - ey > 49) { // 3rd round, good to 151 bits, covers all cases t = r; w = @"fn" * pio2_3; @@ -95,9 +95,9 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 { var i: i32 = undefined; var ui: u64 = undefined; - ui = @bitCast(u64, x); + ui = @as(u64, @bitCast(x)); sign = ui >> 63 != 0; - ix = @truncate(u32, (ui >> 32) & 0x7fffffff); + ix = @as(u32, @truncate((ui >> 32) & 0x7fffffff)); if (ix <= 0x400f6a7a) { // |x| ~<= 5pi/4 if ((ix & 0xfffff) == 0x921fb) { // |x| ~= pi/2 or 2pi/2 return medium(ix, x, y); @@ -171,14 +171,14 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 { return 0; } // set z = scalbn(|x|,-ilogb(x)+23) - ui = @bitCast(u64, x); + ui = @as(u64, @bitCast(x)); ui &= std.math.maxInt(u64) >> 12; ui |= @as(u64, 0x3ff + 23) << 52; - z = @bitCast(f64, ui); + z = @as(f64, @bitCast(ui)); i = 0; while (i < 2) : (i += 1) { - tx[U(i)] = @floatFromInt(f64, @intFromFloat(i32, z)); + tx[U(i)] = @as(f64, @floatFromInt(@as(i32, @intFromFloat(z)))); z = (z - tx[U(i)]) * 0x1p24; } tx[U(i)] = z; @@ -186,7 +186,7 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 { while (tx[U(i)] == 0.0) { i -= 1; } - n = rem_pio2_large(tx[0..], ty[0..], @intCast(i32, (ix >> 20)) - (0x3ff + 23), i + 1, 1); + n = rem_pio2_large(tx[0..], ty[0..], @as(i32, @intCast((ix >> 20))) - (0x3ff + 23), i + 1, 1); if (sign) { y[0] = -ty[0]; y[1] = -ty[1]; diff --git a/lib/compiler_rt/rem_pio2_large.zig b/lib/compiler_rt/rem_pio2_large.zig index afded1838736..79262f0e5efa 100644 --- a/lib/compiler_rt/rem_pio2_large.zig +++ b/lib/compiler_rt/rem_pio2_large.zig @@ -150,7 +150,7 @@ const PIo2 = [_]f64{ }; fn U(x: anytype) usize { - return @intCast(usize, x); + return @as(usize, @intCast(x)); } /// Returns the last three digits of N with y = x - N*pi/2 so that |y| < pi/2. @@ -295,7 +295,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { i += 1; j += 1; }) { - f[U(i)] = if (j < 0) 0.0 else @floatFromInt(f64, ipio2[U(j)]); + f[U(i)] = if (j < 0) 0.0 else @as(f64, @floatFromInt(ipio2[U(j)])); } // compute q[0],q[1],...q[jk] @@ -322,22 +322,22 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { i += 1; j -= 1; }) { - fw = @floatFromInt(f64, @intFromFloat(i32, 0x1p-24 * z)); - iq[U(i)] = @intFromFloat(i32, z - 0x1p24 * fw); + fw = @as(f64, @floatFromInt(@as(i32, @intFromFloat(0x1p-24 * z)))); + iq[U(i)] = @as(i32, @intFromFloat(z - 0x1p24 * fw)); z = q[U(j - 1)] + fw; } // compute n z = math.scalbn(z, q0); // actual value of z z -= 8.0 * @floor(z * 0.125); // trim off integer >= 8 - n = @intFromFloat(i32, z); - z -= @floatFromInt(f64, n); + n = @as(i32, @intFromFloat(z)); + z -= @as(f64, @floatFromInt(n)); ih = 0; if (q0 > 0) { // need iq[jz-1] to determine n - i = iq[U(jz - 1)] >> @intCast(u5, 24 - q0); + i = iq[U(jz - 1)] >> @as(u5, @intCast(24 - q0)); n += i; - iq[U(jz - 1)] -= i << @intCast(u5, 24 - q0); - ih = iq[U(jz - 1)] >> @intCast(u5, 23 - q0); + iq[U(jz - 1)] -= i << @as(u5, @intCast(24 - q0)); + ih = iq[U(jz - 1)] >> @as(u5, @intCast(23 - q0)); } else if (q0 == 0) { ih = iq[U(jz - 1)] >> 23; } else if (z >= 0.5) { @@ -390,7 +390,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { i = jz + 1; while (i <= jz + k) : (i += 1) { // add q[jz+1] to q[jz+k] - f[U(jx + i)] = @floatFromInt(f64, ipio2[U(jv + i)]); + f[U(jx + i)] = @as(f64, @floatFromInt(ipio2[U(jv + i)])); j = 0; fw = 0; while (j <= jx) : (j += 1) { @@ -414,13 +414,13 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { } else { // break z into 24-bit if necessary z = math.scalbn(z, -q0); if (z >= 0x1p24) { - fw = @floatFromInt(f64, @intFromFloat(i32, 0x1p-24 * z)); - iq[U(jz)] = @intFromFloat(i32, z - 0x1p24 * fw); + fw = @as(f64, @floatFromInt(@as(i32, @intFromFloat(0x1p-24 * z)))); + iq[U(jz)] = @as(i32, @intFromFloat(z - 0x1p24 * fw)); jz += 1; q0 += 24; - iq[U(jz)] = @intFromFloat(i32, fw); + iq[U(jz)] = @as(i32, @intFromFloat(fw)); } else { - iq[U(jz)] = @intFromFloat(i32, z); + iq[U(jz)] = @as(i32, @intFromFloat(z)); } } @@ -428,7 +428,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 { fw = math.scalbn(@as(f64, 1.0), q0); i = jz; while (i >= 0) : (i -= 1) { - q[U(i)] = fw * @floatFromInt(f64, iq[U(i)]); + q[U(i)] = fw * @as(f64, @floatFromInt(iq[U(i)])); fw *= 0x1p-24; } diff --git a/lib/compiler_rt/rem_pio2f.zig b/lib/compiler_rt/rem_pio2f.zig index 9e47bbcb24cc..2be81313f5ce 100644 --- a/lib/compiler_rt/rem_pio2f.zig +++ b/lib/compiler_rt/rem_pio2f.zig @@ -30,14 +30,14 @@ pub fn rem_pio2f(x: f32, y: *f64) i32 { var e0: u32 = undefined; var ui: u32 = undefined; - ui = @bitCast(u32, x); + ui = @as(u32, @bitCast(x)); ix = ui & 0x7fffffff; // 25+53 bit pi is good enough for medium size if (ix < 0x4dc90fdb) { // |x| ~< 2^28*(pi/2), medium size // Use a specialized rint() to get fn. - @"fn" = @floatCast(f64, x) * invpio2 + toint - toint; - n = @intFromFloat(i32, @"fn"); + @"fn" = @as(f64, @floatCast(x)) * invpio2 + toint - toint; + n = @as(i32, @intFromFloat(@"fn")); y.* = x - @"fn" * pio2_1 - @"fn" * pio2_1t; // Matters with directed rounding. if (y.* < -pio4) { @@ -59,8 +59,8 @@ pub fn rem_pio2f(x: f32, y: *f64) i32 { sign = ui >> 31 != 0; e0 = (ix >> 23) - (0x7f + 23); // e0 = ilogb(|x|)-23, positive ui = ix - (e0 << 23); - tx[0] = @bitCast(f32, ui); - n = rem_pio2_large(&tx, &ty, @intCast(i32, e0), 1, 0); + tx[0] = @as(f32, @bitCast(ui)); + n = rem_pio2_large(&tx, &ty, @as(i32, @intCast(e0)), 1, 0); if (sign) { y.* = -ty[0]; return -n; diff --git a/lib/compiler_rt/round.zig b/lib/compiler_rt/round.zig index 121371fa1721..2c7cb8956a2e 100644 --- a/lib/compiler_rt/round.zig +++ b/lib/compiler_rt/round.zig @@ -27,14 +27,14 @@ comptime { pub fn __roundh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, roundf(x)); + return @as(f16, @floatCast(roundf(x))); } pub fn roundf(x_: f32) callconv(.C) f32 { const f32_toint = 1.0 / math.floatEps(f32); var x = x_; - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const e = (u >> 23) & 0xFF; var y: f32 = undefined; @@ -46,7 +46,7 @@ pub fn roundf(x_: f32) callconv(.C) f32 { } if (e < 0x7F - 1) { math.doNotOptimizeAway(x + f32_toint); - return 0 * @bitCast(f32, u); + return 0 * @as(f32, @bitCast(u)); } y = x + f32_toint - f32_toint - x; @@ -69,7 +69,7 @@ pub fn round(x_: f64) callconv(.C) f64 { const f64_toint = 1.0 / math.floatEps(f64); var x = x_; - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; var y: f64 = undefined; @@ -81,7 +81,7 @@ pub fn round(x_: f64) callconv(.C) f64 { } if (e < 0x3ff - 1) { math.doNotOptimizeAway(x + f64_toint); - return 0 * @bitCast(f64, u); + return 0 * @as(f64, @bitCast(u)); } y = x + f64_toint - f64_toint - x; @@ -102,14 +102,14 @@ pub fn round(x_: f64) callconv(.C) f64 { pub fn __roundx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, roundq(x)); + return @as(f80, @floatCast(roundq(x))); } pub fn roundq(x_: f128) callconv(.C) f128 { const f128_toint = 1.0 / math.floatEps(f128); var x = x_; - const u = @bitCast(u128, x); + const u = @as(u128, @bitCast(x)); const e = (u >> 112) & 0x7FFF; var y: f128 = undefined; @@ -121,7 +121,7 @@ pub fn roundq(x_: f128) callconv(.C) f128 { } if (e < 0x3FFF - 1) { math.doNotOptimizeAway(x + f128_toint); - return 0 * @bitCast(f128, u); + return 0 * @as(f128, @bitCast(u)); } y = x + f128_toint - f128_toint - x; diff --git a/lib/compiler_rt/shift.zig b/lib/compiler_rt/shift.zig index 4d8658dbc952..0ca5637d9d5b 100644 --- a/lib/compiler_rt/shift.zig +++ b/lib/compiler_rt/shift.zig @@ -37,13 +37,13 @@ inline fn ashlXi3(comptime T: type, a: T, b: i32) T { if (b >= word_t.bits) { output.s.low = 0; - output.s.high = input.s.low << @intCast(S, b - word_t.bits); + output.s.high = input.s.low << @as(S, @intCast(b - word_t.bits)); } else if (b == 0) { return a; } else { - output.s.low = input.s.low << @intCast(S, b); - output.s.high = input.s.high << @intCast(S, b); - output.s.high |= input.s.low >> @intCast(S, word_t.bits - b); + output.s.low = input.s.low << @as(S, @intCast(b)); + output.s.high = input.s.high << @as(S, @intCast(b)); + output.s.high |= input.s.low >> @as(S, @intCast(word_t.bits - b)); } return output.all; @@ -60,16 +60,16 @@ inline fn ashrXi3(comptime T: type, a: T, b: i32) T { if (b >= word_t.bits) { output.s.high = input.s.high >> (word_t.bits - 1); - output.s.low = input.s.high >> @intCast(S, b - word_t.bits); + output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits)); } else if (b == 0) { return a; } else { - output.s.high = input.s.high >> @intCast(S, b); - output.s.low = input.s.high << @intCast(S, word_t.bits - b); + output.s.high = input.s.high >> @as(S, @intCast(b)); + output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b)); // Avoid sign-extension here - output.s.low |= @bitCast( + output.s.low |= @as( word_t.HalfT, - @bitCast(word_t.HalfTU, input.s.low) >> @intCast(S, b), + @bitCast(@as(word_t.HalfTU, @bitCast(input.s.low)) >> @as(S, @intCast(b))), ); } @@ -87,13 +87,13 @@ inline fn lshrXi3(comptime T: type, a: T, b: i32) T { if (b >= word_t.bits) { output.s.high = 0; - output.s.low = input.s.high >> @intCast(S, b - word_t.bits); + output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits)); } else if (b == 0) { return a; } else { - output.s.high = input.s.high >> @intCast(S, b); - output.s.low = input.s.high << @intCast(S, word_t.bits - b); - output.s.low |= input.s.low >> @intCast(S, b); + output.s.high = input.s.high >> @as(S, @intCast(b)); + output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b)); + output.s.low |= input.s.low >> @as(S, @intCast(b)); } return output.all; diff --git a/lib/compiler_rt/shift_test.zig b/lib/compiler_rt/shift_test.zig index b9c5dc64fab0..03388bfa1e31 100644 --- a/lib/compiler_rt/shift_test.zig +++ b/lib/compiler_rt/shift_test.zig @@ -18,346 +18,346 @@ const __lshrti3 = shift.__lshrti3; fn test__ashlsi3(a: i32, b: i32, expected: u32) !void { const x = __ashlsi3(a, b); - try testing.expectEqual(expected, @bitCast(u32, x)); + try testing.expectEqual(expected, @as(u32, @bitCast(x))); } fn test__ashldi3(a: i64, b: i32, expected: u64) !void { const x = __ashldi3(a, b); - try testing.expectEqual(expected, @bitCast(u64, x)); + try testing.expectEqual(expected, @as(u64, @bitCast(x))); } fn test__ashlti3(a: i128, b: i32, expected: u128) !void { const x = __ashlti3(a, b); - try testing.expectEqual(expected, @bitCast(u128, x)); + try testing.expectEqual(expected, @as(u128, @bitCast(x))); } test "ashlsi3" { - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 0, 0x12ABCDEF); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 1, 0x25579BDE); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 2, 0x4AAF37BC); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 3, 0x955E6F78); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 4, 0x2ABCDEF0); - - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 28, 0xF0000000); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 29, 0xE0000000); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 30, 0xC0000000); - try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 31, 0x80000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 0, 0x12ABCDEF); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 1, 0x25579BDE); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 2, 0x4AAF37BC); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 3, 0x955E6F78); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 4, 0x2ABCDEF0); + + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 28, 0xF0000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 29, 0xE0000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 30, 0xC0000000); + try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 31, 0x80000000); } test "ashldi3" { - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x2468ACF13579BDE); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37BC); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x91A2B3C4D5E6F78); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDEF0); - - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x789ABCDEF0000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0xF13579BDE0000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0xE26AF37BC0000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0xC4D5E6F780000000); - - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x89ABCDEF00000000); - - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x13579BDE00000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x26AF37BC00000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x4D5E6F7800000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x9ABCDEF000000000); - - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0xF000000000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0xE000000000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0xC000000000000000); - try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0x8000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x2468ACF13579BDE); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37BC); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x91A2B3C4D5E6F78); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDEF0); + + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x789ABCDEF0000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0xF13579BDE0000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0xE26AF37BC0000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0xC4D5E6F780000000); + + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x89ABCDEF00000000); + + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x13579BDE00000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x26AF37BC00000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x4D5E6F7800000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x9ABCDEF000000000); + + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0xF000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0xE000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0xC000000000000000); + try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0x8000000000000000); } test "ashlti3" { - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 0, 0xFEDCBA9876543215FEDCBA9876543215); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 1, 0xFDB97530ECA8642BFDB97530ECA8642A); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 2, 0xFB72EA61D950C857FB72EA61D950C854); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 3, 0xF6E5D4C3B2A190AFF6E5D4C3B2A190A8); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 4, 0xEDCBA9876543215FEDCBA98765432150); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 28, 0x876543215FEDCBA98765432150000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 29, 0x0ECA8642BFDB97530ECA8642A0000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 30, 0x1D950C857FB72EA61D950C8540000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 31, 0x3B2A190AFF6E5D4C3B2A190A80000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 32, 0x76543215FEDCBA987654321500000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 33, 0xECA8642BFDB97530ECA8642A00000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 34, 0xD950C857FB72EA61D950C85400000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 35, 0xB2A190AFF6E5D4C3B2A190A800000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 36, 0x6543215FEDCBA9876543215000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 60, 0x5FEDCBA9876543215000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 61, 0xBFDB97530ECA8642A000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 62, 0x7FB72EA61D950C854000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 63, 0xFF6E5D4C3B2A190A8000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 64, 0xFEDCBA98765432150000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 65, 0xFDB97530ECA8642A0000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 66, 0xFB72EA61D950C8540000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 67, 0xF6E5D4C3B2A190A80000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 68, 0xEDCBA987654321500000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 92, 0x87654321500000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 93, 0x0ECA8642A00000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 94, 0x1D950C85400000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 95, 0x3B2A190A800000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 96, 0x76543215000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 97, 0xECA8642A000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 98, 0xD950C854000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 99, 0xB2A190A8000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 100, 0x65432150000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 124, 0x50000000000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 125, 0xA0000000000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 126, 0x40000000000000000000000000000000); - try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 127, 0x80000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 0, 0xFEDCBA9876543215FEDCBA9876543215); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 1, 0xFDB97530ECA8642BFDB97530ECA8642A); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 2, 0xFB72EA61D950C857FB72EA61D950C854); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 3, 0xF6E5D4C3B2A190AFF6E5D4C3B2A190A8); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 4, 0xEDCBA9876543215FEDCBA98765432150); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 28, 0x876543215FEDCBA98765432150000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 29, 0x0ECA8642BFDB97530ECA8642A0000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 30, 0x1D950C857FB72EA61D950C8540000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 31, 0x3B2A190AFF6E5D4C3B2A190A80000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 32, 0x76543215FEDCBA987654321500000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 33, 0xECA8642BFDB97530ECA8642A00000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 34, 0xD950C857FB72EA61D950C85400000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 35, 0xB2A190AFF6E5D4C3B2A190A800000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 36, 0x6543215FEDCBA9876543215000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 60, 0x5FEDCBA9876543215000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 61, 0xBFDB97530ECA8642A000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 62, 0x7FB72EA61D950C854000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 63, 0xFF6E5D4C3B2A190A8000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 64, 0xFEDCBA98765432150000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 65, 0xFDB97530ECA8642A0000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 66, 0xFB72EA61D950C8540000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 67, 0xF6E5D4C3B2A190A80000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 68, 0xEDCBA987654321500000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 92, 0x87654321500000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 93, 0x0ECA8642A00000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 94, 0x1D950C85400000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 95, 0x3B2A190A800000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 96, 0x76543215000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 97, 0xECA8642A000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 98, 0xD950C854000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 99, 0xB2A190A8000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 100, 0x65432150000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 124, 0x50000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 125, 0xA0000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 126, 0x40000000000000000000000000000000); + try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 127, 0x80000000000000000000000000000000); } fn test__ashrsi3(a: i32, b: i32, expected: u32) !void { const x = __ashrsi3(a, b); - try testing.expectEqual(expected, @bitCast(u32, x)); + try testing.expectEqual(expected, @as(u32, @bitCast(x))); } fn test__ashrdi3(a: i64, b: i32, expected: u64) !void { const x = __ashrdi3(a, b); - try testing.expectEqual(expected, @bitCast(u64, x)); + try testing.expectEqual(expected, @as(u64, @bitCast(x))); } fn test__ashrti3(a: i128, b: i32, expected: u128) !void { const x = __ashrti3(a, b); - try testing.expectEqual(expected, @bitCast(u128, x)); + try testing.expectEqual(expected, @as(u128, @bitCast(x))); } test "ashrsi3" { - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 0, 0xFEDBCA98); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 1, 0xFF6DE54C); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 2, 0xFFB6F2A6); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 3, 0xFFDB7953); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 4, 0xFFEDBCA9); - - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 28, 0xFFFFFFFF); - try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 31, 0xFFFFFFFF); - - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 0, 0x8CEF8CEF); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 1, 0xC677C677); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 2, 0xE33BE33B); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 3, 0xF19DF19D); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 4, 0xF8CEF8CE); - - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 28, 0xFFFFFFF8); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 29, 0xFFFFFFFC); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 30, 0xFFFFFFFE); - try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 31, 0xFFFFFFFF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 0, 0xFEDBCA98); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 1, 0xFF6DE54C); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 2, 0xFFB6F2A6); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 3, 0xFFDB7953); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 4, 0xFFEDBCA9); + + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 28, 0xFFFFFFFF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 31, 0xFFFFFFFF); + + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 0, 0x8CEF8CEF); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 1, 0xC677C677); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 2, 0xE33BE33B); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 3, 0xF19DF19D); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 4, 0xF8CEF8CE); + + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 28, 0xFFFFFFF8); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 29, 0xFFFFFFFC); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 30, 0xFFFFFFFE); + try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 31, 0xFFFFFFFF); } test "ashrdi3" { - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x91A2B3C4D5E6F7); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37B); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x2468ACF13579BD); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDE); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x12345678); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0x91A2B3C); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0x48D159E); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0x2468ACF); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x1234567); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x91A2B3); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x48D159); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x2468AC); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x123456); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0); - try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 0, 0xFEDCBA9876543210); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 1, 0xFF6E5D4C3B2A1908); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 2, 0xFFB72EA61D950C84); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 3, 0xFFDB97530ECA8642); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 4, 0xFFEDCBA987654321); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 28, 0xFFFFFFFFEDCBA987); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 29, 0xFFFFFFFFF6E5D4C3); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 30, 0xFFFFFFFFFB72EA61); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 31, 0xFFFFFFFFFDB97530); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 32, 0xFFFFFFFFFEDCBA98); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 33, 0xFFFFFFFFFF6E5D4C); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 34, 0xFFFFFFFFFFB72EA6); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 35, 0xFFFFFFFFFFDB9753); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 36, 0xFFFFFFFFFFEDCBA9); - - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 60, 0xFFFFFFFFFFFFFFFA); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 61, 0xFFFFFFFFFFFFFFFD); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 62, 0xFFFFFFFFFFFFFFFE); - try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 63, 0xFFFFFFFFFFFFFFFF); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x91A2B3C4D5E6F7); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37B); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x2468ACF13579BD); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDE); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x12345678); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0x91A2B3C); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0x48D159E); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0x2468ACF); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x1234567); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x91A2B3); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x48D159); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x2468AC); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x123456); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 0, 0xFEDCBA9876543210); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 1, 0xFF6E5D4C3B2A1908); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 2, 0xFFB72EA61D950C84); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 3, 0xFFDB97530ECA8642); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 4, 0xFFEDCBA987654321); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 28, 0xFFFFFFFFEDCBA987); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 29, 0xFFFFFFFFF6E5D4C3); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 30, 0xFFFFFFFFFB72EA61); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 31, 0xFFFFFFFFFDB97530); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 32, 0xFFFFFFFFFEDCBA98); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 33, 0xFFFFFFFFFF6E5D4C); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 34, 0xFFFFFFFFFFB72EA6); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 35, 0xFFFFFFFFFFDB9753); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 36, 0xFFFFFFFFFFEDCBA9); + + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 60, 0xFFFFFFFFFFFFFFFA); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 61, 0xFFFFFFFFFFFFFFFD); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 62, 0xFFFFFFFFFFFFFFFE); + try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 63, 0xFFFFFFFFFFFFFFFF); } test "ashrti3" { - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 0, 0xFEDCBA9876543215FEDCBA9876543215); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 1, 0xFF6E5D4C3B2A190AFF6E5D4C3B2A190A); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 2, 0xFFB72EA61D950C857FB72EA61D950C85); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 3, 0xFFDB97530ECA8642BFDB97530ECA8642); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 4, 0xFFEDCBA9876543215FEDCBA987654321); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 28, 0xFFFFFFFFEDCBA9876543215FEDCBA987); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 29, 0xFFFFFFFFF6E5D4C3B2A190AFF6E5D4C3); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 30, 0xFFFFFFFFFB72EA61D950C857FB72EA61); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 31, 0xFFFFFFFFFDB97530ECA8642BFDB97530); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 32, 0xFFFFFFFFFEDCBA9876543215FEDCBA98); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 33, 0xFFFFFFFFFF6E5D4C3B2A190AFF6E5D4C); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 34, 0xFFFFFFFFFFB72EA61D950C857FB72EA6); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 35, 0xFFFFFFFFFFDB97530ECA8642BFDB9753); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 36, 0xFFFFFFFFFFEDCBA9876543215FEDCBA9); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 60, 0xFFFFFFFFFFFFFFFFEDCBA9876543215F); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 61, 0xFFFFFFFFFFFFFFFFF6E5D4C3B2A190AF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 62, 0xFFFFFFFFFFFFFFFFFB72EA61D950C857); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 63, 0xFFFFFFFFFFFFFFFFFDB97530ECA8642B); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 64, 0xFFFFFFFFFFFFFFFFFEDCBA9876543215); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 65, 0xFFFFFFFFFFFFFFFFFF6E5D4C3B2A190A); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 66, 0xFFFFFFFFFFFFFFFFFFB72EA61D950C85); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 67, 0xFFFFFFFFFFFFFFFFFFDB97530ECA8642); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 68, 0xFFFFFFFFFFFFFFFFFFEDCBA987654321); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 92, 0xFFFFFFFFFFFFFFFFFFFFFFFFEDCBA987); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 93, 0xFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C3); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 94, 0xFFFFFFFFFFFFFFFFFFFFFFFFFB72EA61); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 95, 0xFFFFFFFFFFFFFFFFFFFFFFFFFDB97530); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 96, 0xFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA98); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 97, 0xFFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 98, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFB72EA6); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 99, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFDB9753); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 100, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA9); - - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 124, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 125, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 126, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 127, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 0, 0xFEDCBA9876543215FEDCBA9876543215); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 1, 0xFF6E5D4C3B2A190AFF6E5D4C3B2A190A); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 2, 0xFFB72EA61D950C857FB72EA61D950C85); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 3, 0xFFDB97530ECA8642BFDB97530ECA8642); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 4, 0xFFEDCBA9876543215FEDCBA987654321); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 28, 0xFFFFFFFFEDCBA9876543215FEDCBA987); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 29, 0xFFFFFFFFF6E5D4C3B2A190AFF6E5D4C3); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 30, 0xFFFFFFFFFB72EA61D950C857FB72EA61); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 31, 0xFFFFFFFFFDB97530ECA8642BFDB97530); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 32, 0xFFFFFFFFFEDCBA9876543215FEDCBA98); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 33, 0xFFFFFFFFFF6E5D4C3B2A190AFF6E5D4C); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 34, 0xFFFFFFFFFFB72EA61D950C857FB72EA6); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 35, 0xFFFFFFFFFFDB97530ECA8642BFDB9753); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 36, 0xFFFFFFFFFFEDCBA9876543215FEDCBA9); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 60, 0xFFFFFFFFFFFFFFFFEDCBA9876543215F); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 61, 0xFFFFFFFFFFFFFFFFF6E5D4C3B2A190AF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 62, 0xFFFFFFFFFFFFFFFFFB72EA61D950C857); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 63, 0xFFFFFFFFFFFFFFFFFDB97530ECA8642B); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 64, 0xFFFFFFFFFFFFFFFFFEDCBA9876543215); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 65, 0xFFFFFFFFFFFFFFFFFF6E5D4C3B2A190A); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 66, 0xFFFFFFFFFFFFFFFFFFB72EA61D950C85); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 67, 0xFFFFFFFFFFFFFFFFFFDB97530ECA8642); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 68, 0xFFFFFFFFFFFFFFFFFFEDCBA987654321); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 92, 0xFFFFFFFFFFFFFFFFFFFFFFFFEDCBA987); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 93, 0xFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C3); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 94, 0xFFFFFFFFFFFFFFFFFFFFFFFFFB72EA61); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 95, 0xFFFFFFFFFFFFFFFFFFFFFFFFFDB97530); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 96, 0xFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA98); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 97, 0xFFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 98, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFB72EA6); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 99, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFDB9753); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 100, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA9); + + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 124, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 125, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 126, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 127, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); } fn test__lshrsi3(a: i32, b: i32, expected: u32) !void { const x = __lshrsi3(a, b); - try testing.expectEqual(expected, @bitCast(u32, x)); + try testing.expectEqual(expected, @as(u32, @bitCast(x))); } fn test__lshrdi3(a: i64, b: i32, expected: u64) !void { const x = __lshrdi3(a, b); - try testing.expectEqual(expected, @bitCast(u64, x)); + try testing.expectEqual(expected, @as(u64, @bitCast(x))); } fn test__lshrti3(a: i128, b: i32, expected: u128) !void { const x = __lshrti3(a, b); - try testing.expectEqual(expected, @bitCast(u128, x)); + try testing.expectEqual(expected, @as(u128, @bitCast(x))); } test "lshrsi3" { - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 0, 0xFEDBCA98); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 1, 0x7F6DE54C); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 2, 0x3FB6F2A6); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 3, 0x1FDB7953); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 4, 0xFEDBCA9); - - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 28, 0xF); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 29, 0x7); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 30, 0x3); - try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 31, 0x1); - - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 0, 0x8CEF8CEF); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 1, 0x4677C677); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 2, 0x233BE33B); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 3, 0x119DF19D); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 4, 0x8CEF8CE); - - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 28, 0x8); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 29, 0x4); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 30, 0x2); - try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 31, 0x1); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 0, 0xFEDBCA98); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 1, 0x7F6DE54C); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 2, 0x3FB6F2A6); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 3, 0x1FDB7953); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 4, 0xFEDBCA9); + + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 28, 0xF); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 29, 0x7); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 30, 0x3); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 31, 0x1); + + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 0, 0x8CEF8CEF); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 1, 0x4677C677); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 2, 0x233BE33B); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 3, 0x119DF19D); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 4, 0x8CEF8CE); + + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 28, 0x8); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 29, 0x4); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 30, 0x2); + try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 31, 0x1); } test "lshrdi3" { - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x91A2B3C4D5E6F7); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37B); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x2468ACF13579BD); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDE); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x12345678); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0x91A2B3C); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0x48D159E); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0x2468ACF); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x1234567); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x91A2B3); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x48D159); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x2468AC); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x123456); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0); - try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 0, 0xFEDCBA9876543210); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 1, 0x7F6E5D4C3B2A1908); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 2, 0x3FB72EA61D950C84); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 3, 0x1FDB97530ECA8642); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 4, 0xFEDCBA987654321); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 28, 0xFEDCBA987); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 29, 0x7F6E5D4C3); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 30, 0x3FB72EA61); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 31, 0x1FDB97530); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 32, 0xFEDCBA98); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 33, 0x7F6E5D4C); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 34, 0x3FB72EA6); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 35, 0x1FDB9753); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 36, 0xFEDCBA9); - - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 60, 0xA); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 61, 0x5); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 62, 0x2); - try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 63, 0x1); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x91A2B3C4D5E6F7); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37B); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x2468ACF13579BD); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDE); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x12345678); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0x91A2B3C); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0x48D159E); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0x2468ACF); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x1234567); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x91A2B3); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x48D159); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x2468AC); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x123456); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 0, 0xFEDCBA9876543210); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 1, 0x7F6E5D4C3B2A1908); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 2, 0x3FB72EA61D950C84); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 3, 0x1FDB97530ECA8642); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 4, 0xFEDCBA987654321); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 28, 0xFEDCBA987); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 29, 0x7F6E5D4C3); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 30, 0x3FB72EA61); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 31, 0x1FDB97530); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 32, 0xFEDCBA98); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 33, 0x7F6E5D4C); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 34, 0x3FB72EA6); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 35, 0x1FDB9753); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 36, 0xFEDCBA9); + + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 60, 0xA); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 61, 0x5); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 62, 0x2); + try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 63, 0x1); } test "lshrti3" { - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 0, 0xFEDCBA9876543215FEDCBA987654321F); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 1, 0x7F6E5D4C3B2A190AFF6E5D4C3B2A190F); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 2, 0x3FB72EA61D950C857FB72EA61D950C87); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 3, 0x1FDB97530ECA8642BFDB97530ECA8643); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 4, 0xFEDCBA9876543215FEDCBA987654321); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 28, 0xFEDCBA9876543215FEDCBA987); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 29, 0x7F6E5D4C3B2A190AFF6E5D4C3); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 30, 0x3FB72EA61D950C857FB72EA61); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 31, 0x1FDB97530ECA8642BFDB97530); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 32, 0xFEDCBA9876543215FEDCBA98); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 33, 0x7F6E5D4C3B2A190AFF6E5D4C); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 34, 0x3FB72EA61D950C857FB72EA6); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 35, 0x1FDB97530ECA8642BFDB9753); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 36, 0xFEDCBA9876543215FEDCBA9); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 60, 0xFEDCBA9876543215F); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 61, 0x7F6E5D4C3B2A190AF); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 62, 0x3FB72EA61D950C857); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 63, 0x1FDB97530ECA8642B); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 64, 0xFEDCBA9876543215); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 65, 0x7F6E5D4C3B2A190A); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 66, 0x3FB72EA61D950C85); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 67, 0x1FDB97530ECA8642); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 68, 0xFEDCBA987654321); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 92, 0xFEDCBA987); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 93, 0x7F6E5D4C3); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 94, 0x3FB72EA61); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 95, 0x1FDB97530); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 96, 0xFEDCBA98); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 97, 0x7F6E5D4C); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 98, 0x3FB72EA6); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 99, 0x1FDB9753); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 100, 0xFEDCBA9); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 124, 0xF); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 125, 0x7); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 126, 0x3); - try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 127, 0x1); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 0, 0xFEDCBA9876543215FEDCBA987654321F); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 1, 0x7F6E5D4C3B2A190AFF6E5D4C3B2A190F); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 2, 0x3FB72EA61D950C857FB72EA61D950C87); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 3, 0x1FDB97530ECA8642BFDB97530ECA8643); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 4, 0xFEDCBA9876543215FEDCBA987654321); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 28, 0xFEDCBA9876543215FEDCBA987); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 29, 0x7F6E5D4C3B2A190AFF6E5D4C3); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 30, 0x3FB72EA61D950C857FB72EA61); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 31, 0x1FDB97530ECA8642BFDB97530); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 32, 0xFEDCBA9876543215FEDCBA98); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 33, 0x7F6E5D4C3B2A190AFF6E5D4C); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 34, 0x3FB72EA61D950C857FB72EA6); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 35, 0x1FDB97530ECA8642BFDB9753); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 36, 0xFEDCBA9876543215FEDCBA9); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 60, 0xFEDCBA9876543215F); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 61, 0x7F6E5D4C3B2A190AF); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 62, 0x3FB72EA61D950C857); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 63, 0x1FDB97530ECA8642B); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 64, 0xFEDCBA9876543215); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 65, 0x7F6E5D4C3B2A190A); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 66, 0x3FB72EA61D950C85); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 67, 0x1FDB97530ECA8642); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 68, 0xFEDCBA987654321); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 92, 0xFEDCBA987); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 93, 0x7F6E5D4C3); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 94, 0x3FB72EA61); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 95, 0x1FDB97530); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 96, 0xFEDCBA98); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 97, 0x7F6E5D4C); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 98, 0x3FB72EA6); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 99, 0x1FDB9753); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 100, 0xFEDCBA9); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 124, 0xF); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 125, 0x7); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 126, 0x3); + try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 127, 0x1); } diff --git a/lib/compiler_rt/sin.zig b/lib/compiler_rt/sin.zig index eb3d64b0c878..40c8287b87ea 100644 --- a/lib/compiler_rt/sin.zig +++ b/lib/compiler_rt/sin.zig @@ -31,7 +31,7 @@ comptime { pub fn __sinh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, sinf(x)); + return @as(f16, @floatCast(sinf(x))); } pub fn sinf(x: f32) callconv(.C) f32 { @@ -41,7 +41,7 @@ pub fn sinf(x: f32) callconv(.C) f32 { const s3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const s4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); const sign = ix >> 31 != 0; ix &= 0x7fffffff; @@ -90,7 +90,7 @@ pub fn sinf(x: f32) callconv(.C) f32 { } pub fn sin(x: f64) callconv(.C) f64 { - var ix = @bitCast(u64, x) >> 32; + var ix = @as(u64, @bitCast(x)) >> 32; ix &= 0x7fffffff; // |x| ~< pi/4 @@ -120,12 +120,12 @@ pub fn sin(x: f64) callconv(.C) f64 { pub fn __sinx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, sinq(x)); + return @as(f80, @floatCast(sinq(x))); } pub fn sinq(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return sin(@floatCast(f64, x)); + return sin(@as(f64, @floatCast(x))); } pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble { @@ -180,11 +180,11 @@ test "sin64.special" { } test "sin32 #9901" { - const float = @bitCast(f32, @as(u32, 0b11100011111111110000000000000000)); + const float = @as(f32, @bitCast(@as(u32, 0b11100011111111110000000000000000))); _ = sinf(float); } test "sin64 #9901" { - const float = @bitCast(f64, @as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001)); + const float = @as(f64, @bitCast(@as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001))); _ = sin(float); } diff --git a/lib/compiler_rt/sincos.zig b/lib/compiler_rt/sincos.zig index 769c8d83897d..ffe67e0b3329 100644 --- a/lib/compiler_rt/sincos.zig +++ b/lib/compiler_rt/sincos.zig @@ -26,8 +26,8 @@ pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void { var big_sin: f32 = undefined; var big_cos: f32 = undefined; sincosf(x, &big_sin, &big_cos); - r_sin.* = @floatCast(f16, big_sin); - r_cos.* = @floatCast(f16, big_cos); + r_sin.* = @as(f16, @floatCast(big_sin)); + r_cos.* = @as(f16, @floatCast(big_cos)); } pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void { @@ -36,7 +36,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void { const sc3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const sc4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - const pre_ix = @bitCast(u32, x); + const pre_ix = @as(u32, @bitCast(x)); const sign = pre_ix >> 31 != 0; const ix = pre_ix & 0x7fffffff; @@ -126,7 +126,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void { } pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.C) void { - const ix = @truncate(u32, @bitCast(u64, x) >> 32) & 0x7fffffff; + const ix = @as(u32, @truncate(@as(u64, @bitCast(x)) >> 32)) & 0x7fffffff; // |x| ~< pi/4 if (ix <= 0x3fe921fb) { @@ -182,8 +182,8 @@ pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.C) void { var big_sin: f128 = undefined; var big_cos: f128 = undefined; sincosq(x, &big_sin, &big_cos); - r_sin.* = @floatCast(f80, big_sin); - r_cos.* = @floatCast(f80, big_cos); + r_sin.* = @as(f80, @floatCast(big_sin)); + r_cos.* = @as(f80, @floatCast(big_cos)); } pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void { @@ -191,7 +191,7 @@ pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void { //return sincos_generic(f128, x, r_sin, r_cos); var small_sin: f64 = undefined; var small_cos: f64 = undefined; - sincos(@floatCast(f64, x), &small_sin, &small_cos); + sincos(@as(f64, @floatCast(x)), &small_sin, &small_cos); r_sin.* = small_sin; r_cos.* = small_cos; } @@ -217,8 +217,8 @@ inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void { const sc1pio4: F = 1.0 * math.pi / 4.0; const bits = @typeInfo(F).Float.bits; const I = std.meta.Int(.unsigned, bits); - const ix = @bitCast(I, x) & (math.maxInt(I) >> 1); - const se = @truncate(u16, ix >> (bits - 16)); + const ix = @as(I, @bitCast(x)) & (math.maxInt(I) >> 1); + const se = @as(u16, @truncate(ix >> (bits - 16))); if (se == 0x7fff) { const result = x - x; @@ -227,7 +227,7 @@ inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void { return; } - if (@bitCast(F, ix) < sc1pio4) { + if (@as(F, @bitCast(ix)) < sc1pio4) { if (se < 0x3fff - math.floatFractionalBits(F) - 1) { // raise underflow if subnormal if (se == 0) { diff --git a/lib/compiler_rt/sqrt.zig b/lib/compiler_rt/sqrt.zig index 2ec9c39e0bf1..0dbd67330687 100644 --- a/lib/compiler_rt/sqrt.zig +++ b/lib/compiler_rt/sqrt.zig @@ -20,13 +20,13 @@ comptime { pub fn __sqrth(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, sqrtf(x)); + return @as(f16, @floatCast(sqrtf(x))); } pub fn sqrtf(x: f32) callconv(.C) f32 { const tiny: f32 = 1.0e-30; - const sign: i32 = @bitCast(i32, @as(u32, 0x80000000)); - var ix: i32 = @bitCast(i32, x); + const sign: i32 = @as(i32, @bitCast(@as(u32, 0x80000000))); + var ix: i32 = @as(i32, @bitCast(x)); if ((ix & 0x7F800000) == 0x7F800000) { return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan @@ -96,7 +96,7 @@ pub fn sqrtf(x: f32) callconv(.C) f32 { ix = (q >> 1) + 0x3f000000; ix += m << 23; - return @bitCast(f32, ix); + return @as(f32, @bitCast(ix)); } /// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound @@ -105,10 +105,10 @@ pub fn sqrtf(x: f32) callconv(.C) f32 { pub fn sqrt(x: f64) callconv(.C) f64 { const tiny: f64 = 1.0e-300; const sign: u32 = 0x80000000; - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); - var ix0 = @intCast(u32, u >> 32); - var ix1 = @intCast(u32, u & 0xFFFFFFFF); + var ix0 = @as(u32, @intCast(u >> 32)); + var ix1 = @as(u32, @intCast(u & 0xFFFFFFFF)); // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan if (ix0 & 0x7FF00000 == 0x7FF00000) { @@ -125,7 +125,7 @@ pub fn sqrt(x: f64) callconv(.C) f64 { } // normalize x - var m = @intCast(i32, ix0 >> 20); + var m = @as(i32, @intCast(ix0 >> 20)); if (m == 0) { // subnormal while (ix0 == 0) { @@ -139,9 +139,9 @@ pub fn sqrt(x: f64) callconv(.C) f64 { while (ix0 & 0x00100000 == 0) : (i += 1) { ix0 <<= 1; } - m -= @intCast(i32, i) - 1; - ix0 |= ix1 >> @intCast(u5, 32 - i); - ix1 <<= @intCast(u5, i); + m -= @as(i32, @intCast(i)) - 1; + ix0 |= ix1 >> @as(u5, @intCast(32 - i)); + ix1 <<= @as(u5, @intCast(i)); } // unbias exponent @@ -225,21 +225,21 @@ pub fn sqrt(x: f64) callconv(.C) f64 { // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same // behaviour at least. - var iix0 = @intCast(i32, ix0); + var iix0 = @as(i32, @intCast(ix0)); iix0 = iix0 +% (m << 20); - const uz = (@intCast(u64, iix0) << 32) | ix1; - return @bitCast(f64, uz); + const uz = (@as(u64, @intCast(iix0)) << 32) | ix1; + return @as(f64, @bitCast(uz)); } pub fn __sqrtx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, sqrtq(x)); + return @as(f80, @floatCast(sqrtq(x))); } pub fn sqrtq(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return sqrt(@floatCast(f64, x)); + return sqrt(@as(f64, @floatCast(x))); } pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/subdf3.zig b/lib/compiler_rt/subdf3.zig index a7630b6ea2ef..31e34472987d 100644 --- a/lib/compiler_rt/subdf3.zig +++ b/lib/compiler_rt/subdf3.zig @@ -11,11 +11,11 @@ comptime { } fn __subdf3(a: f64, b: f64) callconv(.C) f64 { - const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63)); + const neg_b = @as(f64, @bitCast(@as(u64, @bitCast(b)) ^ (@as(u64, 1) << 63))); return a + neg_b; } fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 { - const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63)); + const neg_b = @as(f64, @bitCast(@as(u64, @bitCast(b)) ^ (@as(u64, 1) << 63))); return a + neg_b; } diff --git a/lib/compiler_rt/subhf3.zig b/lib/compiler_rt/subhf3.zig index f1d648102ba0..5f84f3272536 100644 --- a/lib/compiler_rt/subhf3.zig +++ b/lib/compiler_rt/subhf3.zig @@ -7,6 +7,6 @@ comptime { } fn __subhf3(a: f16, b: f16) callconv(.C) f16 { - const neg_b = @bitCast(f16, @bitCast(u16, b) ^ (@as(u16, 1) << 15)); + const neg_b = @as(f16, @bitCast(@as(u16, @bitCast(b)) ^ (@as(u16, 1) << 15))); return a + neg_b; } diff --git a/lib/compiler_rt/subsf3.zig b/lib/compiler_rt/subsf3.zig index fbc48ead416f..f94d9802d195 100644 --- a/lib/compiler_rt/subsf3.zig +++ b/lib/compiler_rt/subsf3.zig @@ -11,11 +11,11 @@ comptime { } fn __subsf3(a: f32, b: f32) callconv(.C) f32 { - const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31)); + const neg_b = @as(f32, @bitCast(@as(u32, @bitCast(b)) ^ (@as(u32, 1) << 31))); return a + neg_b; } fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 { - const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31)); + const neg_b = @as(f32, @bitCast(@as(u32, @bitCast(b)) ^ (@as(u32, 1) << 31))); return a + neg_b; } diff --git a/lib/compiler_rt/subtf3.zig b/lib/compiler_rt/subtf3.zig index 0008905c9446..ee6383a07da9 100644 --- a/lib/compiler_rt/subtf3.zig +++ b/lib/compiler_rt/subtf3.zig @@ -20,6 +20,6 @@ fn _Qp_sub(c: *f128, a: *const f128, b: *const f128) callconv(.C) void { } inline fn sub(a: f128, b: f128) f128 { - const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127)); + const neg_b = @as(f128, @bitCast(@as(u128, @bitCast(b)) ^ (@as(u128, 1) << 127))); return a + neg_b; } diff --git a/lib/compiler_rt/tan.zig b/lib/compiler_rt/tan.zig index d6ed881afcb2..79bda609159b 100644 --- a/lib/compiler_rt/tan.zig +++ b/lib/compiler_rt/tan.zig @@ -33,7 +33,7 @@ comptime { pub fn __tanh(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, tanf(x)); + return @as(f16, @floatCast(tanf(x))); } pub fn tanf(x: f32) callconv(.C) f32 { @@ -43,7 +43,7 @@ pub fn tanf(x: f32) callconv(.C) f32 { const t3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2 const t4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18 - var ix = @bitCast(u32, x); + var ix = @as(u32, @bitCast(x)); const sign = ix >> 31 != 0; ix &= 0x7fffffff; @@ -81,7 +81,7 @@ pub fn tanf(x: f32) callconv(.C) f32 { } pub fn tan(x: f64) callconv(.C) f64 { - var ix = @bitCast(u64, x) >> 32; + var ix = @as(u64, @bitCast(x)) >> 32; ix &= 0x7fffffff; // |x| ~< pi/4 @@ -106,12 +106,12 @@ pub fn tan(x: f64) callconv(.C) f64 { pub fn __tanx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, tanq(x)); + return @as(f80, @floatCast(tanq(x))); } pub fn tanq(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return tan(@floatCast(f64, x)); + return tan(@as(f64, @floatCast(x))); } pub fn tanl(x: c_longdouble) callconv(.C) c_longdouble { diff --git a/lib/compiler_rt/trig.zig b/lib/compiler_rt/trig.zig index 4a9629e5c012..375f70ddffb6 100644 --- a/lib/compiler_rt/trig.zig +++ b/lib/compiler_rt/trig.zig @@ -70,7 +70,7 @@ pub fn __cosdf(x: f64) f32 { const z = x * x; const w = z * z; const r = C2 + z * C3; - return @floatCast(f32, ((1.0 + z * C0) + w * C1) + (w * z) * r); + return @as(f32, @floatCast(((1.0 + z * C0) + w * C1) + (w * z) * r)); } /// kernel sin function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854 @@ -131,7 +131,7 @@ pub fn __sindf(x: f64) f32 { const w = z * z; const r = S3 + z * S4; const s = z * x; - return @floatCast(f32, (x + s * (S1 + z * S2)) + s * w * r); + return @as(f32, @floatCast((x + s * (S1 + z * S2)) + s * w * r)); } /// kernel tan function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854 @@ -199,7 +199,7 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { var hx: u32 = undefined; var sign: bool = undefined; - hx = @intCast(u32, @bitCast(u64, x) >> 32); + hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32)); const big = (hx & 0x7fffffff) >= 0x3FE59428; // |x| >= 0.6744 if (big) { sign = hx >> 31 != 0; @@ -222,7 +222,7 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { r = y + z * (s * (r + v) + y) + s * T[0]; w = x + r; if (big) { - s = 1 - 2 * @floatFromInt(f64, @intFromBool(odd)); + s = 1 - 2 * @as(f64, @floatFromInt(@intFromBool(odd))); v = s - 2.0 * (x + (r - w * w / (w + s))); return if (sign) -v else v; } @@ -231,11 +231,11 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { } // -1.0/(x+r) has up to 2ulp error, so compute it accurately w0 = w; - w0 = @bitCast(f64, @bitCast(u64, w0) & 0xffffffff00000000); + w0 = @as(f64, @bitCast(@as(u64, @bitCast(w0)) & 0xffffffff00000000)); v = r - (w0 - x); // w0+v = r+x a = -1.0 / w; a0 = a; - a0 = @bitCast(f64, @bitCast(u64, a0) & 0xffffffff00000000); + a0 = @as(f64, @bitCast(@as(u64, @bitCast(a0)) & 0xffffffff00000000)); return a0 + a * (1.0 + a0 * w0 + a0 * v); } @@ -269,5 +269,5 @@ pub fn __tandf(x: f64, odd: bool) f32 { const s = z * x; const u = T[0] + z * T[1]; const r0 = (x + s * u) + (s * w) * (t + w * r); - return @floatCast(f32, if (odd) -1.0 / r0 else r0); + return @as(f32, @floatCast(if (odd) -1.0 / r0 else r0)); } diff --git a/lib/compiler_rt/trunc.zig b/lib/compiler_rt/trunc.zig index 8c66ba69e7f1..031f2eb65c20 100644 --- a/lib/compiler_rt/trunc.zig +++ b/lib/compiler_rt/trunc.zig @@ -27,12 +27,12 @@ comptime { pub fn __trunch(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, truncf(x)); + return @as(f16, @floatCast(truncf(x))); } pub fn truncf(x: f32) callconv(.C) f32 { - const u = @bitCast(u32, x); - var e = @intCast(i32, ((u >> 23) & 0xFF)) - 0x7F + 9; + const u = @as(u32, @bitCast(x)); + var e = @as(i32, @intCast(((u >> 23) & 0xFF))) - 0x7F + 9; var m: u32 = undefined; if (e >= 23 + 9) { @@ -42,18 +42,18 @@ pub fn truncf(x: f32) callconv(.C) f32 { e = 1; } - m = @as(u32, math.maxInt(u32)) >> @intCast(u5, e); + m = @as(u32, math.maxInt(u32)) >> @as(u5, @intCast(e)); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); - return @bitCast(f32, u & ~m); + return @as(f32, @bitCast(u & ~m)); } } pub fn trunc(x: f64) callconv(.C) f64 { - const u = @bitCast(u64, x); - var e = @intCast(i32, ((u >> 52) & 0x7FF)) - 0x3FF + 12; + const u = @as(u64, @bitCast(x)); + var e = @as(i32, @intCast(((u >> 52) & 0x7FF))) - 0x3FF + 12; var m: u64 = undefined; if (e >= 52 + 12) { @@ -63,23 +63,23 @@ pub fn trunc(x: f64) callconv(.C) f64 { e = 1; } - m = @as(u64, math.maxInt(u64)) >> @intCast(u6, e); + m = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(e)); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); - return @bitCast(f64, u & ~m); + return @as(f64, @bitCast(u & ~m)); } } pub fn __truncx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, truncq(x)); + return @as(f80, @floatCast(truncq(x))); } pub fn truncq(x: f128) callconv(.C) f128 { - const u = @bitCast(u128, x); - var e = @intCast(i32, ((u >> 112) & 0x7FFF)) - 0x3FFF + 16; + const u = @as(u128, @bitCast(x)); + var e = @as(i32, @intCast(((u >> 112) & 0x7FFF))) - 0x3FFF + 16; var m: u128 = undefined; if (e >= 112 + 16) { @@ -89,12 +89,12 @@ pub fn truncq(x: f128) callconv(.C) f128 { e = 1; } - m = @as(u128, math.maxInt(u128)) >> @intCast(u7, e); + m = @as(u128, math.maxInt(u128)) >> @as(u7, @intCast(e)); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); - return @bitCast(f128, u & ~m); + return @as(f128, @bitCast(u & ~m)); } } diff --git a/lib/compiler_rt/truncdfhf2.zig b/lib/compiler_rt/truncdfhf2.zig index e76ad2ce62ef..ce849a8b9e58 100644 --- a/lib/compiler_rt/truncdfhf2.zig +++ b/lib/compiler_rt/truncdfhf2.zig @@ -12,9 +12,9 @@ comptime { } pub fn __truncdfhf2(a: f64) callconv(.C) common.F16T(f64) { - return @bitCast(common.F16T(f64), truncf(f16, f64, a)); + return @as(common.F16T(f64), @bitCast(truncf(f16, f64, a))); } fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 { - return @bitCast(common.F16T(f64), truncf(f16, f64, a)); + return @as(common.F16T(f64), @bitCast(truncf(f16, f64, a))); } diff --git a/lib/compiler_rt/truncf.zig b/lib/compiler_rt/truncf.zig index 3de342fc99d5..49c7cd11e1b8 100644 --- a/lib/compiler_rt/truncf.zig +++ b/lib/compiler_rt/truncf.zig @@ -38,7 +38,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t const dstNaNCode = dstQNaN - 1; // Break a into a sign and representation of the absolute value - const aRep: src_rep_t = @bitCast(src_rep_t, a); + const aRep: src_rep_t = @as(src_rep_t, @bitCast(a)); const aAbs: src_rep_t = aRep & srcAbsMask; const sign: src_rep_t = aRep & srcSignMask; var absResult: dst_rep_t = undefined; @@ -47,7 +47,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. - absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits)); + absResult = @as(dst_rep_t, @truncate(aAbs >> (srcSigBits - dstSigBits))); absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits; const roundBits: src_rep_t = aAbs & roundMask; @@ -62,18 +62,18 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. - absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits; + absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits; absResult |= dstQNaN; - absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode); + absResult |= @as(dst_rep_t, @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode)); } else if (aAbs >= overflow) { // a overflows to infinity. - absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits; + absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent // to get the shift amount for the denormalization. - const aExp = @intCast(u32, aAbs >> srcSigBits); - const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1); + const aExp = @as(u32, @intCast(aAbs >> srcSigBits)); + const shift = @as(u32, @intCast(srcExpBias - dstExpBias - aExp + 1)); const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal; @@ -81,9 +81,9 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t if (shift > srcSigBits) { absResult = 0; } else { - const sticky: src_rep_t = @intFromBool(significand << @intCast(SrcShift, srcBits - shift) != 0); - const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky; - absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits)); + const sticky: src_rep_t = @intFromBool(significand << @as(SrcShift, @intCast(srcBits - shift)) != 0); + const denormalizedSignificand: src_rep_t = significand >> @as(SrcShift, @intCast(shift)) | sticky; + absResult = @as(dst_rep_t, @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits))); const roundBits: src_rep_t = denormalizedSignificand & roundMask; if (roundBits > halfway) { // Round to nearest @@ -96,8 +96,8 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t } const result: dst_rep_t align(@alignOf(dst_t)) = absResult | - @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits)); - return @bitCast(dst_t, result); + @as(dst_rep_t, @truncate(sign >> @as(SrcShift, @intCast(srcBits - dstBits)))); + return @as(dst_t, @bitCast(result)); } pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { @@ -133,7 +133,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits; - abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits)); + abs_result |= @as(dst_rep_t, @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits))); abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits; const round_bits = a_rep.fraction & round_mask; @@ -148,12 +148,12 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. - abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits; + abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits; abs_result |= dst_qnan; - abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask); + abs_result |= @as(dst_rep_t, @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask)); } else if (a_rep.exp >= overflow) { // a overflows to infinity. - abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits; + abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent @@ -164,9 +164,9 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { if (shift > src_sig_bits) { abs_result = 0; } else { - const sticky = @intFromBool(a_rep.fraction << @intCast(u6, shift) != 0); - const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky; - abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits)); + const sticky = @intFromBool(a_rep.fraction << @as(u6, @intCast(shift)) != 0); + const denormalized_significand = a_rep.fraction >> @as(u6, @intCast(shift)) | sticky; + abs_result = @as(dst_rep_t, @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits))); const round_bits = denormalized_significand & round_mask; if (round_bits > halfway) { // Round to nearest @@ -179,7 +179,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { } const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16; - return @bitCast(dst_t, result); + return @as(dst_t, @bitCast(result)); } test { diff --git a/lib/compiler_rt/truncf_test.zig b/lib/compiler_rt/truncf_test.zig index d4e93cd11499..fd1ee9b38a3e 100644 --- a/lib/compiler_rt/truncf_test.zig +++ b/lib/compiler_rt/truncf_test.zig @@ -10,7 +10,7 @@ const __trunctfdf2 = @import("trunctfdf2.zig").__trunctfdf2; const __trunctfxf2 = @import("trunctfxf2.zig").__trunctfxf2; fn test__truncsfhf2(a: u32, expected: u16) !void { - const actual = @bitCast(u16, __truncsfhf2(@bitCast(f32, a))); + const actual = @as(u16, @bitCast(__truncsfhf2(@as(f32, @bitCast(a))))); if (actual == expected) { return; @@ -73,7 +73,7 @@ test "truncsfhf2" { } fn test__truncdfhf2(a: f64, expected: u16) void { - const rep = @bitCast(u16, __truncdfhf2(a)); + const rep = @as(u16, @bitCast(__truncdfhf2(a))); if (rep == expected) { return; @@ -89,7 +89,7 @@ fn test__truncdfhf2(a: f64, expected: u16) void { } fn test__truncdfhf2_raw(a: u64, expected: u16) void { - const actual = @bitCast(u16, __truncdfhf2(@bitCast(f64, a))); + const actual = @as(u16, @bitCast(__truncdfhf2(@as(f64, @bitCast(a))))); if (actual == expected) { return; @@ -141,7 +141,7 @@ test "truncdfhf2" { fn test__trunctfsf2(a: f128, expected: u32) void { const x = __trunctfsf2(a); - const rep = @bitCast(u32, x); + const rep = @as(u32, @bitCast(x)); if (rep == expected) { return; } @@ -157,11 +157,11 @@ fn test__trunctfsf2(a: f128, expected: u32) void { test "trunctfsf2" { // qnan - test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7fc00000); + test__trunctfsf2(@as(f128, @bitCast(@as(u128, 0x7fff800000000000 << 64))), 0x7fc00000); // nan - test__trunctfsf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000); + test__trunctfsf2(@as(f128, @bitCast(@as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64))), 0x7fc08000); // inf - test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7f800000); + test__trunctfsf2(@as(f128, @bitCast(@as(u128, 0x7fff000000000000 << 64))), 0x7f800000); // zero test__trunctfsf2(0.0, 0x0); @@ -174,7 +174,7 @@ test "trunctfsf2" { fn test__trunctfdf2(a: f128, expected: u64) void { const x = __trunctfdf2(a); - const rep = @bitCast(u64, x); + const rep = @as(u64, @bitCast(x)); if (rep == expected) { return; } @@ -190,11 +190,11 @@ fn test__trunctfdf2(a: f128, expected: u64) void { test "trunctfdf2" { // qnan - test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7ff8000000000000); + test__trunctfdf2(@as(f128, @bitCast(@as(u128, 0x7fff800000000000 << 64))), 0x7ff8000000000000); // nan - test__trunctfdf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000); + test__trunctfdf2(@as(f128, @bitCast(@as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64))), 0x7ff8100000000000); // inf - test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7ff0000000000000); + test__trunctfdf2(@as(f128, @bitCast(@as(u128, 0x7fff000000000000 << 64))), 0x7ff0000000000000); // zero test__trunctfdf2(0.0, 0x0); @@ -207,7 +207,7 @@ test "trunctfdf2" { fn test__truncdfsf2(a: f64, expected: u32) void { const x = __truncdfsf2(a); - const rep = @bitCast(u32, x); + const rep = @as(u32, @bitCast(x)); if (rep == expected) { return; } @@ -225,11 +225,11 @@ fn test__truncdfsf2(a: f64, expected: u32) void { test "truncdfsf2" { // nan & qnan - test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff8000000000000)), 0x7fc00000); - test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000001)), 0x7fc00000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff8000000000000))), 0x7fc00000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff0000000000001))), 0x7fc00000); // inf - test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000000)), 0x7f800000); - test__truncdfsf2(@bitCast(f64, @as(u64, 0xfff0000000000000)), 0xff800000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff0000000000000))), 0x7f800000); + test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0xfff0000000000000))), 0xff800000); test__truncdfsf2(0.0, 0x0); test__truncdfsf2(1.0, 0x3f800000); @@ -242,7 +242,7 @@ test "truncdfsf2" { fn test__trunctfhf2(a: f128, expected: u16) void { const x = __trunctfhf2(a); - const rep = @bitCast(u16, x); + const rep = @as(u16, @bitCast(x)); if (rep == expected) { return; } @@ -254,12 +254,12 @@ fn test__trunctfhf2(a: f128, expected: u16) void { test "trunctfhf2" { // qNaN - test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff8000000000000000000000000000)), 0x7e00); + test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff8000000000000000000000000000))), 0x7e00); // NaN - test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000001)), 0x7e00); + test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000001))), 0x7e00); // inf - test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0x7c00); - test__trunctfhf2(-@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0xfc00); + test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000000))), 0x7c00); + test__trunctfhf2(-@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000000))), 0xfc00); // zero test__trunctfhf2(0.0, 0x0); test__trunctfhf2(-0.0, 0x8000); diff --git a/lib/compiler_rt/truncsfhf2.zig b/lib/compiler_rt/truncsfhf2.zig index 77dd0ba642d5..c747d8e37a90 100644 --- a/lib/compiler_rt/truncsfhf2.zig +++ b/lib/compiler_rt/truncsfhf2.zig @@ -13,13 +13,13 @@ comptime { } pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T(f32) { - return @bitCast(common.F16T(f32), truncf(f16, f32, a)); + return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a))); } fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T(f32) { - return @bitCast(common.F16T(f32), truncf(f16, f32, a)); + return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a))); } fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 { - return @bitCast(common.F16T(f32), truncf(f16, f32, a)); + return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a))); } diff --git a/lib/compiler_rt/trunctfhf2.zig b/lib/compiler_rt/trunctfhf2.zig index e9cc19da188b..9c7a3b6dba37 100644 --- a/lib/compiler_rt/trunctfhf2.zig +++ b/lib/compiler_rt/trunctfhf2.zig @@ -8,5 +8,5 @@ comptime { } pub fn __trunctfhf2(a: f128) callconv(.C) common.F16T(f128) { - return @bitCast(common.F16T(f128), truncf(f16, f128, a)); + return @as(common.F16T(f128), @bitCast(truncf(f16, f128, a))); } diff --git a/lib/compiler_rt/trunctfxf2.zig b/lib/compiler_rt/trunctfxf2.zig index 018057f213ca..8478446b5139 100644 --- a/lib/compiler_rt/trunctfxf2.zig +++ b/lib/compiler_rt/trunctfxf2.zig @@ -25,7 +25,7 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 { const halfway = 1 << (src_sig_bits - dst_sig_bits - 1); // Break a into a sign and representation of the absolute value - const a_rep = @bitCast(u128, a); + const a_rep = @as(u128, @bitCast(a)); const a_abs = a_rep & src_abs_mask; const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0; const integer_bit = 1 << 63; @@ -38,13 +38,13 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 { // bit and inserting the (truncated) trailing NaN field. res.exp = 0x7fff; res.fraction = 0x8000000000000000; - res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)); + res.fraction |= @as(u64, @truncate(a_abs >> (src_sig_bits - dst_sig_bits))); } else { // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding, adding the explicit integer bit, and adjusting the exponent - res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit; - res.exp = @truncate(u16, a_abs >> src_sig_bits); + res.fraction = @as(u64, @truncate(a_abs >> (src_sig_bits - dst_sig_bits))) | integer_bit; + res.exp = @as(u16, @truncate(a_abs >> src_sig_bits)); const round_bits = a_abs & round_mask; if (round_bits > halfway) { diff --git a/lib/compiler_rt/truncxfhf2.zig b/lib/compiler_rt/truncxfhf2.zig index 31965d3e2a35..6dbeca7637b6 100644 --- a/lib/compiler_rt/truncxfhf2.zig +++ b/lib/compiler_rt/truncxfhf2.zig @@ -8,5 +8,5 @@ comptime { } fn __truncxfhf2(a: f80) callconv(.C) common.F16T(f80) { - return @bitCast(common.F16T(f80), trunc_f80(f16, a)); + return @as(common.F16T(f80), @bitCast(trunc_f80(f16, a))); } diff --git a/lib/compiler_rt/udivmod.zig b/lib/compiler_rt/udivmod.zig index a83ece8ada46..0e2a7d9ed1b3 100644 --- a/lib/compiler_rt/udivmod.zig +++ b/lib/compiler_rt/udivmod.zig @@ -21,11 +21,11 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T { var un64: T = undefined; var un10: T = undefined; - const s = @intCast(Log2Int(T), @clz(v)); + const s = @as(Log2Int(T), @intCast(@clz(v))); if (s > 0) { // Normalize divisor v <<= s; - un64 = (_u1 << s) | (_u0 >> @intCast(Log2Int(T), (@bitSizeOf(T) - @intCast(T, s)))); + un64 = (_u1 << s) | (_u0 >> @as(Log2Int(T), @intCast((@bitSizeOf(T) - @as(T, @intCast(s)))))); un10 = _u0 << s; } else { // Avoid undefined behavior of (u0 >> @bitSizeOf(T)) @@ -101,8 +101,8 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T { return 0; } - var a = @bitCast([2]HalfT, a_); - var b = @bitCast([2]HalfT, b_); + var a = @as([2]HalfT, @bitCast(a_)); + var b = @as([2]HalfT, @bitCast(b_)); var q: [2]HalfT = undefined; var r: [2]HalfT = undefined; @@ -119,16 +119,16 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T { q[lo] = divwide(HalfT, a[hi] % b[lo], a[lo], b[lo], &r[lo]); } if (maybe_rem) |rem| { - rem.* = @bitCast(T, r); + rem.* = @as(T, @bitCast(r)); } - return @bitCast(T, q); + return @as(T, @bitCast(q)); } // 0 <= shift <= 63 var shift: Log2Int(T) = @clz(b[hi]) - @clz(a[hi]); - var af = @bitCast(T, a); - var bf = @bitCast(T, b) << shift; - q = @bitCast([2]HalfT, @as(T, 0)); + var af = @as(T, @bitCast(a)); + var bf = @as(T, @bitCast(b)) << shift; + q = @as([2]HalfT, @bitCast(@as(T, 0))); for (0..shift + 1) |_| { q[lo] <<= 1; @@ -137,13 +137,13 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T { // af -= bf; // q[lo] |= 1; // } - const s = @bitCast(SignedT, bf -% af -% 1) >> (@bitSizeOf(T) - 1); - q[lo] |= @intCast(HalfT, s & 1); - af -= bf & @bitCast(T, s); + const s = @as(SignedT, @bitCast(bf -% af -% 1)) >> (@bitSizeOf(T) - 1); + q[lo] |= @as(HalfT, @intCast(s & 1)); + af -= bf & @as(T, @bitCast(s)); bf >>= 1; } if (maybe_rem) |rem| { - rem.* = @bitCast(T, af); + rem.* = @as(T, @bitCast(af)); } - return @bitCast(T, q); + return @as(T, @bitCast(q)); } diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig index 354a926b816b..f222c13a4c00 100644 --- a/lib/compiler_rt/udivmodei4.zig +++ b/lib/compiler_rt/udivmodei4.zig @@ -83,23 +83,23 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void { i = 0; while (i <= n) : (i += 1) { const p = qhat * limb(&vn, i); - const t = limb(&un, i + j) - carry - @truncate(u32, p); - limb_set(&un, i + j, @truncate(u32, @bitCast(u64, t))); - carry = @intCast(i64, p >> 32) - @intCast(i64, t >> 32); + const t = limb(&un, i + j) - carry - @as(u32, @truncate(p)); + limb_set(&un, i + j, @as(u32, @truncate(@as(u64, @bitCast(t))))); + carry = @as(i64, @intCast(p >> 32)) - @as(i64, @intCast(t >> 32)); } const t = limb(&un, j + n + 1) -% carry; - limb_set(&un, j + n + 1, @truncate(u32, @bitCast(u64, t))); - if (q) |q_| limb_set(q_, j, @truncate(u32, qhat)); + limb_set(&un, j + n + 1, @as(u32, @truncate(@as(u64, @bitCast(t))))); + if (q) |q_| limb_set(q_, j, @as(u32, @truncate(qhat))); if (t < 0) { if (q) |q_| limb_set(q_, j, limb(q_, j) - 1); var carry2: u64 = 0; i = 0; while (i <= n) : (i += 1) { const t2 = @as(u64, limb(&un, i + j)) + @as(u64, limb(&vn, i)) + carry2; - limb_set(&un, i + j, @truncate(u32, t2)); + limb_set(&un, i + j, @as(u32, @truncate(t2))); carry2 = t2 >> 32; } - limb_set(&un, j + n + 1, @truncate(u32, limb(&un, j + n + 1) + carry2)); + limb_set(&un, j + n + 1, @as(u32, @truncate(limb(&un, j + n + 1) + carry2))); } if (j == 0) break; } diff --git a/lib/compiler_rt/udivmodti4.zig b/lib/compiler_rt/udivmodti4.zig index 29523fc6e895..8f4748fa7de2 100644 --- a/lib/compiler_rt/udivmodti4.zig +++ b/lib/compiler_rt/udivmodti4.zig @@ -20,7 +20,7 @@ pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.C) v2u64 { - return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem)); + return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), maybe_rem))); } test { diff --git a/lib/compiler_rt/udivti3.zig b/lib/compiler_rt/udivti3.zig index 748e9b6599cf..1205d6533650 100644 --- a/lib/compiler_rt/udivti3.zig +++ b/lib/compiler_rt/udivti3.zig @@ -20,5 +20,5 @@ pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 { const v2u64 = @Vector(2, u64); fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { - return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null)); + return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), null))); } diff --git a/lib/compiler_rt/umodti3.zig b/lib/compiler_rt/umodti3.zig index 097f9a38558a..41de97d2bb49 100644 --- a/lib/compiler_rt/umodti3.zig +++ b/lib/compiler_rt/umodti3.zig @@ -23,6 +23,6 @@ const v2u64 = @Vector(2, u64); fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 { var r: u128 = undefined; - _ = udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), &r); - return @bitCast(v2u64, r); + _ = udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), &r); + return @as(v2u64, @bitCast(r)); } diff --git a/lib/ssp.zig b/lib/ssp.zig index f75c4d1a55ba..4f8eba567fb1 100644 --- a/lib/ssp.zig +++ b/lib/ssp.zig @@ -46,7 +46,7 @@ export var __stack_chk_guard: usize = blk: { var buf = [1]u8{0} ** @sizeOf(usize); buf[@sizeOf(usize) - 1] = 255; buf[@sizeOf(usize) - 2] = '\n'; - break :blk @bitCast(usize, buf); + break :blk @as(usize, @bitCast(buf)); }; export fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 { diff --git a/lib/std/Build.zig b/lib/std/Build.zig index c569e0074a6a..a411ddc500cf 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1111,7 +1111,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros var populated_cpu_features = whitelist_cpu.model.features; populated_cpu_features.populateDependencies(all_features); for (all_features, 0..) |feature, i_usize| { - const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize); + const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); if (in_cpu_set) { log.err("{s} ", .{feature.name}); @@ -1119,7 +1119,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros } log.err(" Remove: ", .{}); for (all_features, 0..) |feature, i_usize| { - const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize); + const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); const in_actual_set = selected_cpu.features.isEnabled(i); if (in_actual_set and !in_cpu_set) { @@ -1442,13 +1442,13 @@ pub fn execAllowFail( switch (term) { .Exited => |code| { if (code != 0) { - out_code.* = @truncate(u8, code); + out_code.* = @as(u8, @truncate(code)); return error.ExitCodeFailure; } return stdout; }, .Signal, .Stopped, .Unknown => |code| { - out_code.* = @truncate(u8, code); + out_code.* = @as(u8, @truncate(code)); return error.ProcessTerminated; }, } @@ -1815,7 +1815,7 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 { try mcpu_buffer.appendSlice(cpu.model.name); for (all_features, 0..) |feature, i_usize| { - const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize); + const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); const in_actual_set = cpu.features.isEnabled(i); if (in_cpu_set and !in_actual_set) { @@ -1852,7 +1852,7 @@ pub fn hex64(x: u64) [16]u8 { var result: [16]u8 = undefined; var i: usize = 0; while (i < 8) : (i += 1) { - const byte = @truncate(u8, x >> @intCast(u6, 8 * i)); + const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i)))); result[i * 2 + 0] = hex_charset[byte >> 4]; result[i * 2 + 1] = hex_charset[byte & 15]; } diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 3b7f180ae82f..b0db88692cda 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -128,7 +128,7 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath { const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]); gpa.free(resolved_path); return PrefixedPath{ - .prefix = @intCast(u8, i), + .prefix = @as(u8, @intCast(i)), .sub_path = sub_path, }; } @@ -653,7 +653,7 @@ pub const Manifest = struct { return error.FileTooBig; } - const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size)); + const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size))); errdefer self.cache.gpa.free(contents); // Hash while reading from disk, to keep the contents in the cpu cache while diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index a0d7a6a296ff..f21ef8bc8f7b 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -355,7 +355,7 @@ pub fn evalZigProcess( }, .error_bundle => { const EbHdr = std.zig.Server.Message.ErrorBundle; - const eb_hdr = @ptrCast(*align(1) const EbHdr, body); + const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body)); const extra_bytes = body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len]; const string_bytes = @@ -377,7 +377,7 @@ pub fn evalZigProcess( }, .emit_bin_path => { const EbpHdr = std.zig.Server.Message.EmitBinPath; - const ebp_hdr = @ptrCast(*align(1) const EbpHdr, body); + const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body)); s.result_cached = ebp_hdr.flags.cache_hit; result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]); }, diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index 1c2d86e4e36a..171734c45063 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -449,9 +449,9 @@ const MachODumper = struct { }, .SYMTAB => if (opts.dump_symtab) { const lc = cmd.cast(macho.symtab_command).?; - symtab = @ptrCast( + symtab = @as( [*]const macho.nlist_64, - @alignCast(@alignOf(macho.nlist_64), &bytes[lc.symoff]), + @ptrCast(@alignCast(&bytes[lc.symoff])), )[0..lc.nsyms]; strtab = bytes[lc.stroff..][0..lc.strsize]; }, @@ -474,7 +474,7 @@ const MachODumper = struct { try writer.print("{s}\n", .{symtab_label}); for (symtab) |sym| { if (sym.stab()) continue; - const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0); + const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0); if (sym.sect()) { const sect = sections.items[sym.n_sect - 1]; try writer.print("{x} ({s},{s})", .{ @@ -487,7 +487,7 @@ const MachODumper = struct { } try writer.print(" {s}\n", .{sym_name}); } else if (sym.undf()) { - const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER); + const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER); const import_name = blk: { if (ordinal <= 0) { if (ordinal == macho.BIND_SPECIAL_DYLIB_SELF) @@ -498,7 +498,7 @@ const MachODumper = struct { break :blk "flat lookup"; unreachable; } - const full_path = imports.items[@bitCast(u16, ordinal) - 1]; + const full_path = imports.items[@as(u16, @bitCast(ordinal)) - 1]; const basename = fs.path.basename(full_path); assert(basename.len > 0); const ext = mem.lastIndexOfScalar(u8, basename, '.') orelse basename.len; @@ -950,8 +950,8 @@ const WasmDumper = struct { switch (opcode) { .i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}), .i64_const => try writer.print("i64.const {x}\n", .{try std.leb.readILEB128(i64, reader)}), - .f32_const => try writer.print("f32.const {x}\n", .{@bitCast(f32, try reader.readIntLittle(u32))}), - .f64_const => try writer.print("f64.const {x}\n", .{@bitCast(f64, try reader.readIntLittle(u64))}), + .f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readIntLittle(u32)))}), + .f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readIntLittle(u64)))}), .global_get => try writer.print("global.get {x}\n", .{try std.leb.readULEB128(u32, reader)}), else => unreachable, } diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 89576c15faba..58973d08d077 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -321,7 +321,7 @@ pub const BuildId = union(enum) { pub fn initHexString(bytes: []const u8) BuildId { var result: BuildId = .{ .hexstring = .{ .bytes = undefined, - .len = @intCast(u8, bytes.len), + .len = @as(u8, @intCast(bytes.len)), } }; @memcpy(result.hexstring.bytes[0..bytes.len], bytes); return result; @@ -342,7 +342,7 @@ pub const BuildId = union(enum) { } else if (mem.startsWith(u8, text, "0x")) { var result: BuildId = .{ .hexstring = undefined }; const slice = try std.fmt.hexToBytes(&result.hexstring.bytes, text[2..]); - result.hexstring.len = @intCast(u8, slice.len); + result.hexstring.len = @as(u8, @intCast(slice.len)); return result; } return error.InvalidBuildIdStyle; @@ -2059,7 +2059,7 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 { const file = fs.cwd().openFile(path_file, .{}) catch return null; defer file.close(); - const size = @intCast(usize, try file.getEndPos()); + const size = @as(usize, @intCast(try file.getEndPos())); const vcpkg_path = try allocator.alloc(u8, size); const size_read = try file.read(vcpkg_path); std.debug.assert(size == size_read); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index c574dbb5af55..3d8187330810 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -998,7 +998,7 @@ fn evalZigTest( }, .test_metadata => { const TmHdr = std.zig.Server.Message.TestMetadata; - const tm_hdr = @ptrCast(*align(1) const TmHdr, body); + const tm_hdr = @as(*align(1) const TmHdr, @ptrCast(body)); test_count = tm_hdr.tests_len; const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)]; @@ -1034,7 +1034,7 @@ fn evalZigTest( const md = metadata.?; const TrHdr = std.zig.Server.Message.TestResults; - const tr_hdr = @ptrCast(*align(1) const TrHdr, body); + const tr_hdr = @as(*align(1) const TrHdr, @ptrCast(body)); fail_count += @intFromBool(tr_hdr.flags.fail); skip_count += @intFromBool(tr_hdr.flags.skip); leak_count += @intFromBool(tr_hdr.flags.leak); diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index e3c5fc20dd0e..e0bb28569d73 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -232,14 +232,14 @@ fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void { } var cursor_pos = windows.COORD{ - .X = info.dwCursorPosition.X - @intCast(windows.SHORT, p.columns_written), + .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)), .Y = info.dwCursorPosition.Y, }; if (cursor_pos.X < 0) cursor_pos.X = 0; - const fill_chars = @intCast(windows.DWORD, info.dwSize.X - cursor_pos.X); + const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X)); var written: windows.DWORD = undefined; if (windows.kernel32.FillConsoleOutputAttribute( diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index f16f8a9a79b8..a3b469ad6f15 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -66,7 +66,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void { if (self.getHandle() == std.c.pthread_self()) { // Set the name of the calling thread (no thread id required). const err = try os.prctl(.SET_NAME, .{@intFromPtr(name_with_terminator.ptr)}); - switch (@enumFromInt(os.E, err)) { + switch (@as(os.E, @enumFromInt(err))) { .SUCCESS => return, else => |e| return os.unexpectedErrno(e), } @@ -176,7 +176,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co if (self.getHandle() == std.c.pthread_self()) { // Get the name of the calling thread (no thread id required). const err = try os.prctl(.GET_NAME, .{@intFromPtr(buffer.ptr)}); - switch (@enumFromInt(os.E, err)) { + switch (@as(os.E, @enumFromInt(err))) { .SUCCESS => return std.mem.sliceTo(buffer, 0), else => |e| return os.unexpectedErrno(e), } @@ -211,7 +211,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co null, )) { .SUCCESS => { - const string = @ptrCast(*const os.windows.UNICODE_STRING, &buf); + const string = @as(*const os.windows.UNICODE_STRING, @ptrCast(&buf)); const len = try std.unicode.utf16leToUtf8(buffer, string.Buffer[0 .. string.Length / 2]); return if (len > 0) buffer[0..len] else null; }, @@ -510,7 +510,7 @@ const WindowsThreadImpl = struct { thread: ThreadCompletion, fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD { - const self = @ptrCast(*@This(), @alignCast(@alignOf(@This()), raw_ptr)); + const self: *@This() = @ptrCast(@alignCast(raw_ptr)); defer switch (self.thread.completion.swap(.completed, .SeqCst)) { .running => {}, .completed => unreachable, @@ -525,7 +525,7 @@ const WindowsThreadImpl = struct { const alloc_ptr = windows.kernel32.HeapAlloc(heap_handle, 0, alloc_bytes) orelse return error.OutOfMemory; errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0); - const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes]; + const instance_bytes = @as([*]u8, @ptrCast(alloc_ptr))[0..alloc_bytes]; var fba = std.heap.FixedBufferAllocator.init(instance_bytes); const instance = fba.allocator().create(Instance) catch unreachable; instance.* = .{ @@ -547,7 +547,7 @@ const WindowsThreadImpl = struct { null, stack_size, Instance.entryFn, - @ptrCast(*anyopaque, instance), + @as(*anyopaque, @ptrCast(instance)), 0, null, ) orelse { @@ -596,19 +596,19 @@ const PosixThreadImpl = struct { return thread_id; }, .dragonfly => { - return @bitCast(u32, c.lwp_gettid()); + return @as(u32, @bitCast(c.lwp_gettid())); }, .netbsd => { - return @bitCast(u32, c._lwp_self()); + return @as(u32, @bitCast(c._lwp_self())); }, .freebsd => { - return @bitCast(u32, c.pthread_getthreadid_np()); + return @as(u32, @bitCast(c.pthread_getthreadid_np())); }, .openbsd => { - return @bitCast(u32, c.getthrid()); + return @as(u32, @bitCast(c.getthrid())); }, .haiku => { - return @bitCast(u32, c.find_thread(null)); + return @as(u32, @bitCast(c.find_thread(null))); }, else => { return @intFromPtr(c.pthread_self()); @@ -629,7 +629,7 @@ const PosixThreadImpl = struct { error.NameTooLong, error.UnknownName => unreachable, else => |e| return e, }; - return @intCast(usize, count); + return @as(usize, @intCast(count)); }, .solaris => { // The "proper" way to get the cpu count would be to query @@ -637,7 +637,7 @@ const PosixThreadImpl = struct { // cpu. const rc = c.sysconf(os._SC.NPROCESSORS_ONLN); return switch (os.errno(rc)) { - .SUCCESS => @intCast(usize, rc), + .SUCCESS => @as(usize, @intCast(rc)), else => |err| os.unexpectedErrno(err), }; }, @@ -645,7 +645,7 @@ const PosixThreadImpl = struct { var system_info: os.system.system_info = undefined; const rc = os.system.get_system_info(&system_info); // always returns B_OK return switch (os.errno(rc)) { - .SUCCESS => @intCast(usize, system_info.cpu_count), + .SUCCESS => @as(usize, @intCast(system_info.cpu_count)), else => |err| os.unexpectedErrno(err), }; }, @@ -657,7 +657,7 @@ const PosixThreadImpl = struct { error.NameTooLong, error.UnknownName => unreachable, else => |e| return e, }; - return @intCast(usize, count); + return @as(usize, @intCast(count)); }, } } @@ -675,7 +675,7 @@ const PosixThreadImpl = struct { return callFn(f, @as(Args, undefined)); } - const args_ptr = @ptrCast(*Args, @alignCast(@alignOf(Args), raw_arg)); + const args_ptr: *Args = @ptrCast(@alignCast(raw_arg)); defer allocator.destroy(args_ptr); return callFn(f, args_ptr.*); } @@ -699,7 +699,7 @@ const PosixThreadImpl = struct { &handle, &attr, Instance.entryFn, - if (@sizeOf(Args) > 1) @ptrCast(*anyopaque, args_ptr) else undefined, + if (@sizeOf(Args) > 1) @as(*anyopaque, @ptrCast(args_ptr)) else undefined, )) { .SUCCESS => return Impl{ .handle = handle }, .AGAIN => return error.SystemResources, @@ -742,7 +742,7 @@ const LinuxThreadImpl = struct { fn getCurrentId() Id { return tls_thread_id orelse { - const tid = @bitCast(u32, linux.gettid()); + const tid = @as(u32, @bitCast(linux.gettid())); tls_thread_id = tid; return tid; }; @@ -911,7 +911,7 @@ const LinuxThreadImpl = struct { thread: ThreadCompletion, fn entryFn(raw_arg: usize) callconv(.C) u8 { - const self = @ptrFromInt(*@This(), raw_arg); + const self = @as(*@This(), @ptrFromInt(raw_arg)); defer switch (self.thread.completion.swap(.completed, .SeqCst)) { .running => {}, .completed => unreachable, @@ -969,7 +969,7 @@ const LinuxThreadImpl = struct { // map everything but the guard page as read/write os.mprotect( - @alignCast(page_size, mapped[guard_offset..]), + @alignCast(mapped[guard_offset..]), os.PROT.READ | os.PROT.WRITE, ) catch |err| switch (err) { error.AccessDenied => unreachable, @@ -994,7 +994,7 @@ const LinuxThreadImpl = struct { }; } - const instance = @ptrCast(*Instance, @alignCast(@alignOf(Instance), &mapped[instance_offset])); + const instance: *Instance = @ptrCast(@alignCast(&mapped[instance_offset])); instance.* = .{ .fn_args = args, .thread = .{ .mapped = mapped }, diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig index 61e39eba27d5..768442539b84 100644 --- a/lib/std/Thread/Futex.zig +++ b/lib/std/Thread/Futex.zig @@ -128,14 +128,14 @@ const WindowsImpl = struct { // NTDLL functions work with time in units of 100 nanoseconds. // Positive values are absolute deadlines while negative values are relative durations. if (timeout) |delay| { - timeout_value = @intCast(os.windows.LARGE_INTEGER, delay / 100); + timeout_value = @as(os.windows.LARGE_INTEGER, @intCast(delay / 100)); timeout_value = -timeout_value; timeout_ptr = &timeout_value; } const rc = os.windows.ntdll.RtlWaitOnAddress( - @ptrCast(?*const anyopaque, ptr), - @ptrCast(?*const anyopaque, &expect), + @as(?*const anyopaque, @ptrCast(ptr)), + @as(?*const anyopaque, @ptrCast(&expect)), @sizeOf(@TypeOf(expect)), timeout_ptr, ); @@ -151,7 +151,7 @@ const WindowsImpl = struct { } fn wake(ptr: *const Atomic(u32), max_waiters: u32) void { - const address = @ptrCast(?*const anyopaque, ptr); + const address = @as(?*const anyopaque, @ptrCast(ptr)); assert(max_waiters != 0); switch (max_waiters) { @@ -186,7 +186,7 @@ const DarwinImpl = struct { // true so that we we know to ignore the ETIMEDOUT result. var timeout_overflowed = false; - const addr = @ptrCast(*const anyopaque, ptr); + const addr = @as(*const anyopaque, @ptrCast(ptr)); const flags = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO; const status = blk: { if (supports_ulock_wait2) { @@ -202,7 +202,7 @@ const DarwinImpl = struct { }; if (status >= 0) return; - switch (@enumFromInt(std.os.E, -status)) { + switch (@as(std.os.E, @enumFromInt(-status))) { // Wait was interrupted by the OS or other spurious signalling. .INTR => {}, // Address of the futex was paged out. This is unlikely, but possible in theory, and @@ -225,11 +225,11 @@ const DarwinImpl = struct { } while (true) { - const addr = @ptrCast(*const anyopaque, ptr); + const addr = @as(*const anyopaque, @ptrCast(ptr)); const status = os.darwin.__ulock_wake(flags, addr, 0); if (status >= 0) return; - switch (@enumFromInt(std.os.E, -status)) { + switch (@as(std.os.E, @enumFromInt(-status))) { .INTR => continue, // spurious wake() .FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t .NOENT => return, // nothing was woken up @@ -245,14 +245,14 @@ const LinuxImpl = struct { fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void { var ts: os.timespec = undefined; if (timeout) |timeout_ns| { - ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); - ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); + ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } const rc = os.linux.futex_wait( - @ptrCast(*const i32, &ptr.value), + @as(*const i32, @ptrCast(&ptr.value)), os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAIT, - @bitCast(i32, expect), + @as(i32, @bitCast(expect)), if (timeout != null) &ts else null, ); @@ -272,7 +272,7 @@ const LinuxImpl = struct { fn wake(ptr: *const Atomic(u32), max_waiters: u32) void { const rc = os.linux.futex_wake( - @ptrCast(*const i32, &ptr.value), + @as(*const i32, @ptrCast(&ptr.value)), os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE, std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32), ); @@ -299,8 +299,8 @@ const FreebsdImpl = struct { tm._flags = 0; // use relative time not UMTX_ABSTIME tm._clockid = os.CLOCK.MONOTONIC; - tm._timeout.tv_sec = @intCast(@TypeOf(tm._timeout.tv_sec), timeout_ns / std.time.ns_per_s); - tm._timeout.tv_nsec = @intCast(@TypeOf(tm._timeout.tv_nsec), timeout_ns % std.time.ns_per_s); + tm._timeout.tv_sec = @as(@TypeOf(tm._timeout.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + tm._timeout.tv_nsec = @as(@TypeOf(tm._timeout.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } const rc = os.freebsd._umtx_op( @@ -347,14 +347,14 @@ const OpenbsdImpl = struct { fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void { var ts: os.timespec = undefined; if (timeout) |timeout_ns| { - ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); - ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); + ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } const rc = os.openbsd.futex( - @ptrCast(*const volatile u32, &ptr.value), + @as(*const volatile u32, @ptrCast(&ptr.value)), os.openbsd.FUTEX_WAIT | os.openbsd.FUTEX_PRIVATE_FLAG, - @bitCast(c_int, expect), + @as(c_int, @bitCast(expect)), if (timeout != null) &ts else null, null, // FUTEX_WAIT takes no requeue address ); @@ -377,7 +377,7 @@ const OpenbsdImpl = struct { fn wake(ptr: *const Atomic(u32), max_waiters: u32) void { const rc = os.openbsd.futex( - @ptrCast(*const volatile u32, &ptr.value), + @as(*const volatile u32, @ptrCast(&ptr.value)), os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG, std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int), null, // FUTEX_WAKE takes no timeout ptr @@ -411,8 +411,8 @@ const DragonflyImpl = struct { } } - const value = @bitCast(c_int, expect); - const addr = @ptrCast(*const volatile c_int, &ptr.value); + const value = @as(c_int, @bitCast(expect)); + const addr = @as(*const volatile c_int, @ptrCast(&ptr.value)); const rc = os.dragonfly.umtx_sleep(addr, value, timeout_us); switch (os.errno(rc)) { @@ -441,7 +441,7 @@ const DragonflyImpl = struct { // https://man.dragonflybsd.org/?command=umtx§ion=2 // > umtx_wakeup() will generally return 0 unless the address is bad. // We are fine with the address being bad (e.g. for Semaphore.post() where Semaphore.wait() frees the Semaphore) - const addr = @ptrCast(*const volatile c_int, &ptr.value); + const addr = @as(*const volatile c_int, @ptrCast(&ptr.value)); _ = os.dragonfly.umtx_wakeup(addr, to_wake); } }; @@ -488,8 +488,8 @@ const PosixImpl = struct { var ts: os.timespec = undefined; if (timeout) |timeout_ns| { os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable; - ts.tv_sec +|= @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); - ts.tv_nsec += @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); + ts.tv_sec +|= @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); + ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); if (ts.tv_nsec >= std.time.ns_per_s) { ts.tv_sec +|= 1; diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig index 9114caaa1209..0f618516b552 100644 --- a/lib/std/Thread/Mutex.zig +++ b/lib/std/Thread/Mutex.zig @@ -242,12 +242,12 @@ const NonAtomicCounter = struct { value: [2]u64 = [_]u64{ 0, 0 }, fn get(self: NonAtomicCounter) u128 { - return @bitCast(u128, self.value); + return @as(u128, @bitCast(self.value)); } fn inc(self: *NonAtomicCounter) void { - for (@bitCast([2]u64, self.get() + 1), 0..) |v, i| { - @ptrCast(*volatile u64, &self.value[i]).* = v; + for (@as([2]u64, @bitCast(self.get() + 1)), 0..) |v, i| { + @as(*volatile u64, @ptrCast(&self.value[i])).* = v; } } }; diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index d3ad94324ec9..df4c95cbcafb 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -49,7 +49,7 @@ pub fn eqlString(a: []const u8, b: []const u8) bool { } pub fn hashString(s: []const u8) u32 { - return @truncate(u32, std.hash.Wyhash.hash(0, s)); + return @as(u32, @truncate(std.hash.Wyhash.hash(0, s))); } /// Insertion order is preserved. @@ -617,7 +617,7 @@ pub fn ArrayHashMapUnmanaged( return .{ .keys = slice.items(.key).ptr, .values = slice.items(.value).ptr, - .len = @intCast(u32, slice.len), + .len = @as(u32, @intCast(slice.len)), }; } pub const Iterator = struct { @@ -1409,7 +1409,7 @@ pub fn ArrayHashMapUnmanaged( indexes: []Index(I), ) void { const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes); - indexes[slot].entry_index = @intCast(I, new_entry_index); + indexes[slot].entry_index = @as(I, @intCast(new_entry_index)); } fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void { @@ -1508,7 +1508,7 @@ pub fn ArrayHashMapUnmanaged( const new_index = self.entries.addOneAssumeCapacity(); indexes[slot] = .{ .distance_from_start_index = distance_from_start_index, - .entry_index = @intCast(I, new_index), + .entry_index = @as(I, @intCast(new_index)), }; // update the hash if applicable @@ -1549,7 +1549,7 @@ pub fn ArrayHashMapUnmanaged( const new_index = self.entries.addOneAssumeCapacity(); if (store_hash) hashes_array.ptr[new_index] = h; indexes[slot] = .{ - .entry_index = @intCast(I, new_index), + .entry_index = @as(I, @intCast(new_index)), .distance_from_start_index = distance_from_start_index, }; distance_from_start_index = slot_data.distance_from_start_index; @@ -1639,7 +1639,7 @@ pub fn ArrayHashMapUnmanaged( const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; var index = start_index; - var entry_index = @intCast(I, i); + var entry_index = @as(I, @intCast(i)); var distance_from_start_index: I = 0; while (index != end_index) : ({ index +%= 1; @@ -1776,7 +1776,7 @@ fn capacityIndexSize(bit_index: u8) usize { fn safeTruncate(comptime T: type, val: anytype) T { if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val))) return val; - return @truncate(T, val); + return @as(T, @truncate(val)); } /// A single entry in the lookup acceleration structure. These structs @@ -1852,13 +1852,13 @@ const IndexHeader = struct { fn constrainIndex(header: IndexHeader, i: usize) usize { // This is an optimization for modulo of power of two integers; // it requires `indexes_len` to always be a power of two. - return @intCast(usize, i & header.mask()); + return @as(usize, @intCast(i & header.mask())); } /// Returns the attached array of indexes. I must match the type /// returned by capacityIndexType. fn indexes(header: *IndexHeader, comptime I: type) []Index(I) { - const start_ptr = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader)); + const start_ptr: [*]Index(I) = @alignCast(@ptrCast(@as([*]u8, @ptrCast(header)) + @sizeOf(IndexHeader))); return start_ptr[0..header.length()]; } @@ -1871,15 +1871,15 @@ const IndexHeader = struct { return index_capacities[self.bit_index]; } fn length(self: IndexHeader) usize { - return @as(usize, 1) << @intCast(math.Log2Int(usize), self.bit_index); + return @as(usize, 1) << @as(math.Log2Int(usize), @intCast(self.bit_index)); } fn mask(self: IndexHeader) u32 { - return @intCast(u32, self.length() - 1); + return @as(u32, @intCast(self.length() - 1)); } fn findBitIndex(desired_capacity: usize) !u8 { if (desired_capacity > max_capacity) return error.OutOfMemory; - var new_bit_index = @intCast(u8, std.math.log2_int_ceil(usize, desired_capacity)); + var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity))); if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1; if (new_bit_index < min_bit_index) new_bit_index = min_bit_index; assert(desired_capacity <= index_capacities[new_bit_index]); @@ -1889,12 +1889,12 @@ const IndexHeader = struct { /// Allocates an index header, and fills the entryIndexes array with empty. /// The distance array contents are undefined. fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader { - const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index); + const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index)); const index_size = hash_map.capacityIndexSize(new_bit_index); const nbytes = @sizeOf(IndexHeader) + index_size * len; const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes); @memset(bytes[@sizeOf(IndexHeader)..], 0xff); - const result = @ptrCast(*IndexHeader, bytes.ptr); + const result: *IndexHeader = @alignCast(@ptrCast(bytes.ptr)); result.* = .{ .bit_index = new_bit_index, }; @@ -1904,7 +1904,7 @@ const IndexHeader = struct { /// Releases the memory for a header and its associated arrays. fn free(header: *IndexHeader, allocator: Allocator) void { const index_size = hash_map.capacityIndexSize(header.bit_index); - const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header); + const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header); const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size]; allocator.free(slice); } @@ -1912,7 +1912,7 @@ const IndexHeader = struct { /// Puts an IndexHeader into the state that it would be in after being freshly allocated. fn reset(header: *IndexHeader) void { const index_size = hash_map.capacityIndexSize(header.bit_index); - const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header); + const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header); const nbytes = @sizeOf(IndexHeader) + header.length() * index_size; @memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff); } @@ -2020,25 +2020,25 @@ test "iterator hash map" { var count: usize = 0; while (it.next()) |entry| : (count += 1) { - buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*; + buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*; } try testing.expect(count == 3); try testing.expect(it.next() == null); for (buffer, 0..) |_, i| { - try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]); + try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]); } it.reset(); count = 0; while (it.next()) |entry| { - buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*; + buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*; count += 1; if (count >= 2) break; } for (buffer[0..2], 0..) |_, i| { - try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]); + try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]); } it.reset(); @@ -2336,11 +2336,11 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) fn hash(ctx: Context, key: K) u32 { _ = ctx; if (comptime trait.hasUniqueRepresentation(K)) { - return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key))); + return @as(u32, @truncate(Wyhash.hash(0, std.mem.asBytes(&key)))); } else { var hasher = Wyhash.init(0); autoHash(&hasher, key); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } } }.hash; @@ -2380,7 +2380,7 @@ pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime str _ = ctx; var hasher = Wyhash.init(0); std.hash.autoHashStrat(&hasher, key, strategy); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } }.hash; } diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index c2a2486dfa9b..8f3458481c72 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -1123,19 +1123,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" { { var i: usize = 0; while (i < 10) : (i += 1) { - list.append(@intCast(i32, i + 1)) catch unreachable; + list.append(@as(i32, @intCast(i + 1))) catch unreachable; } } { var i: usize = 0; while (i < 10) : (i += 1) { - try testing.expect(list.items[i] == @intCast(i32, i + 1)); + try testing.expect(list.items[i] == @as(i32, @intCast(i + 1))); } } for (list.items, 0..) |v, i| { - try testing.expect(v == @intCast(i32, i + 1)); + try testing.expect(v == @as(i32, @intCast(i + 1))); } try testing.expect(list.pop() == 10); @@ -1173,19 +1173,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" { { var i: usize = 0; while (i < 10) : (i += 1) { - list.append(a, @intCast(i32, i + 1)) catch unreachable; + list.append(a, @as(i32, @intCast(i + 1))) catch unreachable; } } { var i: usize = 0; while (i < 10) : (i += 1) { - try testing.expect(list.items[i] == @intCast(i32, i + 1)); + try testing.expect(list.items[i] == @as(i32, @intCast(i + 1))); } } for (list.items, 0..) |v, i| { - try testing.expect(v == @intCast(i32, i + 1)); + try testing.expect(v == @as(i32, @intCast(i + 1))); } try testing.expect(list.pop() == 10); diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig index c3f17421f338..b9e1b18f771d 100644 --- a/lib/std/atomic/Atomic.zig +++ b/lib/std/atomic/Atomic.zig @@ -46,7 +46,7 @@ pub fn Atomic(comptime T: type) type { extern "c" fn __tsan_release(addr: *anyopaque) void; }; - const addr = @ptrCast(*anyopaque, self); + const addr = @as(*anyopaque, @ptrCast(self)); return switch (ordering) { .Unordered, .Monotonic => @compileError(@tagName(ordering) ++ " only applies to atomic loads and stores"), .Acquire => tsan.__tsan_acquire(addr), @@ -307,7 +307,7 @@ pub fn Atomic(comptime T: type) type { // TODO: emit appropriate tsan fence if compiling with tsan _ = ordering; - return @intCast(u1, old_bit); + return @as(u1, @intCast(old_bit)); } }); }; @@ -392,8 +392,8 @@ test "Atomic.swap" { try testing.expectEqual(a.load(.SeqCst), true); var b = Atomic(?*u8).init(null); - try testing.expectEqual(b.swap(@ptrFromInt(?*u8, @alignOf(u8)), ordering), null); - try testing.expectEqual(b.load(.SeqCst), @ptrFromInt(?*u8, @alignOf(u8))); + try testing.expectEqual(b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), ordering), null); + try testing.expectEqual(b.load(.SeqCst), @as(?*u8, @ptrFromInt(@alignOf(u8)))); } } @@ -544,7 +544,7 @@ test "Atomic.bitSet" { var x = Atomic(Int).init(0); for (0..@bitSizeOf(Int)) |bit_index| { - const bit = @intCast(std.math.Log2Int(Int), bit_index); + const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; // setting the bit should change the bit @@ -558,7 +558,7 @@ test "Atomic.bitSet" { // all the previous bits should have not changed (still be set) for (0..bit_index) |prev_bit_index| { - const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask != 0); } @@ -573,7 +573,7 @@ test "Atomic.bitReset" { var x = Atomic(Int).init(0); for (0..@bitSizeOf(Int)) |bit_index| { - const bit = @intCast(std.math.Log2Int(Int), bit_index); + const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; x.storeUnchecked(x.loadUnchecked() | mask); @@ -588,7 +588,7 @@ test "Atomic.bitReset" { // all the previous bits should have not changed (still be reset) for (0..bit_index) |prev_bit_index| { - const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask == 0); } @@ -603,7 +603,7 @@ test "Atomic.bitToggle" { var x = Atomic(Int).init(0); for (0..@bitSizeOf(Int)) |bit_index| { - const bit = @intCast(std.math.Log2Int(Int), bit_index); + const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; // toggling the bit should change the bit @@ -617,7 +617,7 @@ test "Atomic.bitToggle" { // all the previous bits should have not changed (still be toggled back) for (0..bit_index) |prev_bit_index| { - const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask == 0); } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 70cb293cf43b..78eb7463474e 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -248,7 +248,7 @@ fn startPuts(ctx: *Context) u8 { const random = prng.random(); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz - const x = @bitCast(i32, random.int(u32)); + const x = @as(i32, @bitCast(random.int(u32))); const node = ctx.allocator.create(Queue(i32).Node) catch unreachable; node.* = .{ .prev = undefined, diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index 9ad7c76d81e0..12892176524f 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -151,7 +151,7 @@ fn startPuts(ctx: *Context) u8 { const random = prng.random(); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz - const x = @bitCast(i32, random.int(u32)); + const x = @as(i32, @bitCast(random.int(u32))); const node = ctx.allocator.create(Stack(i32).Node) catch unreachable; node.* = Stack(i32).Node{ .next = undefined, diff --git a/lib/std/base64.zig b/lib/std/base64.zig index 869fa47e5e4f..16e6aa7e8e92 100644 --- a/lib/std/base64.zig +++ b/lib/std/base64.zig @@ -108,12 +108,12 @@ pub const Base64Encoder = struct { acc_len += 8; while (acc_len >= 6) { acc_len -= 6; - dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc >> acc_len))]; + dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc >> acc_len)))]; out_idx += 1; } } if (acc_len > 0) { - dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc << 6 - acc_len))]; + dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc << 6 - acc_len)))]; out_idx += 1; } if (encoder.pad_char) |pad_char| { @@ -144,7 +144,7 @@ pub const Base64Decoder = struct { assert(!char_in_alphabet[c]); assert(pad_char == null or c != pad_char.?); - result.char_to_index[c] = @intCast(u8, i); + result.char_to_index[c] = @as(u8, @intCast(i)); char_in_alphabet[c] = true; } return result; @@ -196,7 +196,7 @@ pub const Base64Decoder = struct { acc_len += 6; if (acc_len >= 8) { acc_len -= 8; - dest[dest_idx] = @truncate(u8, acc >> acc_len); + dest[dest_idx] = @as(u8, @truncate(acc >> acc_len)); dest_idx += 1; } } @@ -271,7 +271,7 @@ pub const Base64DecoderWithIgnore = struct { if (acc_len >= 8) { if (dest_idx == dest.len) return error.NoSpaceLeft; acc_len -= 8; - dest[dest_idx] = @truncate(u8, acc >> acc_len); + dest[dest_idx] = @as(u8, @truncate(acc >> acc_len)); dest_idx += 1; } } diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig index 4b83e8e057af..9e5c707b84d0 100644 --- a/lib/std/bit_set.zig +++ b/lib/std/bit_set.zig @@ -119,19 +119,19 @@ pub fn IntegerBitSet(comptime size: u16) type { if (range.start == range.end) return; if (MaskInt == u0) return; - const start_bit = @intCast(ShiftInt, range.start); + const start_bit = @as(ShiftInt, @intCast(range.start)); var mask = std.math.boolMask(MaskInt, true) << start_bit; if (range.end != bit_length) { - const end_bit = @intCast(ShiftInt, range.end); - mask &= std.math.boolMask(MaskInt, true) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)); + const end_bit = @as(ShiftInt, @intCast(range.end)); + mask &= std.math.boolMask(MaskInt, true) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit))); } self.mask &= ~mask; mask = std.math.boolMask(MaskInt, value) << start_bit; if (range.end != bit_length) { - const end_bit = @intCast(ShiftInt, range.end); - mask &= std.math.boolMask(MaskInt, value) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)); + const end_bit = @as(ShiftInt, @intCast(range.end)); + mask &= std.math.boolMask(MaskInt, value) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit))); } self.mask |= mask; } @@ -292,7 +292,7 @@ pub fn IntegerBitSet(comptime size: u16) type { .reverse => { const leading_zeroes = @clz(self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; - self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1; + self.bits_remain &= (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1; return top_bit; }, } @@ -302,11 +302,11 @@ pub fn IntegerBitSet(comptime size: u16) type { fn maskBit(index: usize) MaskInt { if (MaskInt == u0) return 0; - return @as(MaskInt, 1) << @intCast(ShiftInt, index); + return @as(MaskInt, 1) << @as(ShiftInt, @intCast(index)); } fn boolMaskBit(index: usize, value: bool) MaskInt { if (MaskInt == u0) return 0; - return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index); + return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } }; } @@ -442,10 +442,10 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type { if (num_masks == 0) return; const start_mask_index = maskIndex(range.start); - const start_bit = @truncate(ShiftInt, range.start); + const start_bit = @as(ShiftInt, @truncate(range.start)); const end_mask_index = maskIndex(range.end); - const end_bit = @truncate(ShiftInt, range.end); + const end_bit = @as(ShiftInt, @truncate(range.end)); if (start_mask_index == end_mask_index) { var mask1 = std.math.boolMask(MaskInt, true) << start_bit; @@ -634,13 +634,13 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type { } fn maskBit(index: usize) MaskInt { - return @as(MaskInt, 1) << @truncate(ShiftInt, index); + return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index)); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { - return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index); + return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } }; } @@ -731,7 +731,7 @@ pub const DynamicBitSetUnmanaged = struct { // set the padding bits in the old last item to 1 if (fill and old_masks > 0) { const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len; - const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits); + const old_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(old_padding_bits)); self.masks[old_masks - 1] |= ~old_mask; } @@ -745,7 +745,7 @@ pub const DynamicBitSetUnmanaged = struct { // Zero out the padding bits if (new_len > 0) { const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len; - const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); + const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); self.masks[new_masks - 1] &= last_item_mask; } @@ -816,10 +816,10 @@ pub const DynamicBitSetUnmanaged = struct { if (range.start == range.end) return; const start_mask_index = maskIndex(range.start); - const start_bit = @truncate(ShiftInt, range.start); + const start_bit = @as(ShiftInt, @truncate(range.start)); const end_mask_index = maskIndex(range.end); - const end_bit = @truncate(ShiftInt, range.end); + const end_bit = @as(ShiftInt, @truncate(range.end)); if (start_mask_index == end_mask_index) { var mask1 = std.math.boolMask(MaskInt, true) << start_bit; @@ -887,7 +887,7 @@ pub const DynamicBitSetUnmanaged = struct { } const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length; - const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); + const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); self.masks[num_masks - 1] &= last_item_mask; } @@ -996,7 +996,7 @@ pub const DynamicBitSetUnmanaged = struct { pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { const num_masks = numMasks(self.bit_length); const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length; - const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits); + const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); return Iterator(options).init(self.masks[0..num_masks], last_item_mask); } @@ -1005,13 +1005,13 @@ pub const DynamicBitSetUnmanaged = struct { } fn maskBit(index: usize) MaskInt { - return @as(MaskInt, 1) << @truncate(ShiftInt, index); + return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index)); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { - return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index); + return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } fn numMasks(bit_length: usize) usize { return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt); @@ -1255,7 +1255,7 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ .reverse => { const leading_zeroes = @clz(self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; - const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1; + const no_top_bit_mask = (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1; self.bits_remain &= no_top_bit_mask; return top_bit + self.bit_offset; }, diff --git a/lib/std/bounded_array.zig b/lib/std/bounded_array.zig index 0e0b601af68f..6986414a24e9 100644 --- a/lib/std/bounded_array.zig +++ b/lib/std/bounded_array.zig @@ -394,7 +394,7 @@ test "BoundedArrayAligned" { try a.append(255); try a.append(255); - const b = @ptrCast(*const [2]u16, a.constSlice().ptr); + const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr)); try testing.expectEqual(@as(u16, 0), b[0]); try testing.expectEqual(@as(u16, 65535), b[1]); } diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 54781e44656a..99761b146ddc 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -784,7 +784,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr exit_size.* = 256; - return @ptrCast([*:0]u16, utf16.ptr); + return @as([*:0]u16, @ptrCast(utf16.ptr)); } }; diff --git a/lib/std/c.zig b/lib/std/c.zig index 3b4bfef826f0..149f3ab7e199 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -113,7 +113,7 @@ pub usingnamespace switch (builtin.os.tag) { pub fn getErrno(rc: anytype) c.E { if (rc == -1) { - return @enumFromInt(c.E, c._errno().*); + return @as(c.E, @enumFromInt(c._errno().*)); } else { return .SUCCESS; } diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index 6dd517eada67..0f60c2f841d4 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -1177,10 +1177,10 @@ pub const sigset_t = u32; pub const empty_sigset: sigset_t = 0; pub const SIG = struct { - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 5); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(5)); /// block specified signal set pub const _BLOCK = 1; @@ -1411,7 +1411,7 @@ pub const MAP = struct { pub const NOCACHE = 0x0400; /// don't reserve needed swap area pub const NORESERVE = 0x0040; - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); }; pub const MSF = struct { @@ -1879,7 +1879,7 @@ pub const W = struct { pub const UNTRACED = 0x00000002; pub fn EXITSTATUS(x: u32) u8 { - return @intCast(u8, x >> 8); + return @as(u8, @intCast(x >> 8)); } pub fn TERMSIG(x: u32) u32 { return status(x); @@ -2463,7 +2463,7 @@ pub const KernE = enum(u32) { pub const mach_msg_return_t = kern_return_t; pub fn getMachMsgError(err: mach_msg_return_t) MachMsgE { - return @enumFromInt(MachMsgE, @truncate(u32, @intCast(usize, err))); + return @as(MachMsgE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err)))))); } /// All special error code bits defined below. @@ -2665,10 +2665,10 @@ pub const RTLD = struct { pub const NODELETE = 0x80; pub const FIRST = 0x100; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); - pub const MAIN_ONLY = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -5))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); + pub const MAIN_ONLY = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -5))))); }; pub const F = struct { @@ -3238,14 +3238,14 @@ pub const PosixSpawn = struct { pub fn get(self: Attr) Error!u16 { var flags: c_short = undefined; switch (errno(posix_spawnattr_getflags(&self.attr, &flags))) { - .SUCCESS => return @bitCast(u16, flags), + .SUCCESS => return @as(u16, @bitCast(flags)), .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn set(self: *Attr, flags: u16) Error!void { - switch (errno(posix_spawnattr_setflags(&self.attr, @bitCast(c_short, flags)))) { + switch (errno(posix_spawnattr_setflags(&self.attr, @as(c_short, @bitCast(flags))))) { .SUCCESS => return, .INVAL => unreachable, else => |err| return unexpectedErrno(err), @@ -3281,7 +3281,7 @@ pub const PosixSpawn = struct { } pub fn openZ(self: *Actions, fd: fd_t, path: [*:0]const u8, flags: u32, mode: mode_t) Error!void { - switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @bitCast(c_int, flags), mode))) { + switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @as(c_int, @bitCast(flags)), mode))) { .SUCCESS => return, .BADF => return error.InvalidFileDescriptor, .NOMEM => return error.SystemResources, @@ -3402,11 +3402,11 @@ pub const PosixSpawn = struct { pub fn waitpid(pid: pid_t, flags: u32) Error!std.os.WaitPidResult { var status: c_int = undefined; while (true) { - const rc = waitpid(pid, &status, @intCast(c_int, flags)); + const rc = waitpid(pid, &status, @as(c_int, @intCast(flags))); switch (errno(rc)) { .SUCCESS => return std.os.WaitPidResult{ - .pid = @intCast(pid_t, rc), - .status = @bitCast(u32, status), + .pid = @as(pid_t, @intCast(rc)), + .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => return error.ChildExecFailed, @@ -3418,7 +3418,7 @@ pub const PosixSpawn = struct { }; pub fn getKernError(err: kern_return_t) KernE { - return @enumFromInt(KernE, @truncate(u32, @intCast(usize, err))); + return @as(KernE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err)))))); } pub fn unexpectedKernError(err: KernE) std.os.UnexpectedError { @@ -3585,9 +3585,9 @@ pub const MachTask = extern struct { .top => VM_REGION_TOP_INFO, }, switch (tag) { - .basic => @ptrCast(vm_region_info_t, &info.info.basic), - .extended => @ptrCast(vm_region_info_t, &info.info.extended), - .top => @ptrCast(vm_region_info_t, &info.info.top), + .basic => @as(vm_region_info_t, @ptrCast(&info.info.basic)), + .extended => @as(vm_region_info_t, @ptrCast(&info.info.extended)), + .top => @as(vm_region_info_t, @ptrCast(&info.info.top)), }, &count, &objname, @@ -3640,8 +3640,8 @@ pub const MachTask = extern struct { &base_len, &nesting, switch (tag) { - .short => @ptrCast(vm_region_recurse_info_t, &info.info.short), - .full => @ptrCast(vm_region_recurse_info_t, &info.info.full), + .short => @as(vm_region_recurse_info_t, @ptrCast(&info.info.short)), + .full => @as(vm_region_recurse_info_t, @ptrCast(&info.info.full)), }, &count, ))) { @@ -3701,7 +3701,7 @@ pub const MachTask = extern struct { task.port, curr_addr, @intFromPtr(out_buf.ptr), - @intCast(mach_msg_type_number_t, curr_size), + @as(mach_msg_type_number_t, @intCast(curr_size)), ))) { .SUCCESS => {}, .FAILURE => return error.PermissionDenied, @@ -3752,7 +3752,7 @@ pub const MachTask = extern struct { else => |err| return unexpectedKernError(err), } - @memcpy(out_buf[0..curr_bytes_read], @ptrFromInt([*]const u8, vm_memory)); + @memcpy(out_buf[0..curr_bytes_read], @as([*]const u8, @ptrFromInt(vm_memory))); _ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read); out_buf = out_buf[curr_bytes_read..]; @@ -3782,10 +3782,10 @@ pub const MachTask = extern struct { switch (getKernError(task_info( task.port, TASK_VM_INFO, - @ptrCast(task_info_t, &vm_info), + @as(task_info_t, @ptrCast(&vm_info)), &info_count, ))) { - .SUCCESS => return @intCast(usize, vm_info.page_size), + .SUCCESS => return @as(usize, @intCast(vm_info.page_size)), else => {}, } } @@ -3802,7 +3802,7 @@ pub const MachTask = extern struct { switch (getKernError(task_info( task.port, MACH_TASK_BASIC_INFO, - @ptrCast(task_info_t, &info), + @as(task_info_t, @ptrCast(&info)), &count, ))) { .SUCCESS => return info, @@ -3832,7 +3832,7 @@ pub const MachTask = extern struct { _ = vm_deallocate( self_task.port, @intFromPtr(list.buf.ptr), - @intCast(vm_size_t, list.buf.len * @sizeOf(mach_port_t)), + @as(vm_size_t, @intCast(list.buf.len * @sizeOf(mach_port_t))), ); } }; @@ -3841,7 +3841,7 @@ pub const MachTask = extern struct { var thread_list: mach_port_array_t = undefined; var thread_count: mach_msg_type_number_t = undefined; switch (getKernError(task_threads(task.port, &thread_list, &thread_count))) { - .SUCCESS => return ThreadList{ .buf = @ptrCast([*]MachThread, thread_list)[0..thread_count] }, + .SUCCESS => return ThreadList{ .buf = @as([*]MachThread, @ptrCast(thread_list))[0..thread_count] }, else => |err| return unexpectedKernError(err), } } @@ -3860,7 +3860,7 @@ pub const MachThread = extern struct { switch (getKernError(thread_info( thread.port, THREAD_BASIC_INFO, - @ptrCast(thread_info_t, &info), + @as(thread_info_t, @ptrCast(&info)), &count, ))) { .SUCCESS => return info, @@ -3874,7 +3874,7 @@ pub const MachThread = extern struct { switch (getKernError(thread_info( thread.port, THREAD_IDENTIFIER_INFO, - @ptrCast(thread_info_t, &info), + @as(thread_info_t, @ptrCast(&info)), &count, ))) { .SUCCESS => return info, @@ -3962,7 +3962,7 @@ pub const thread_affinity_policy_t = [*]thread_affinity_policy; pub const THREAD_AFFINITY = struct { pub const POLICY = 0; - pub const POLICY_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t)); + pub const POLICY_COUNT = @as(mach_msg_type_number_t, @intCast(@sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t))); }; /// cpu affinity api @@ -4041,7 +4041,7 @@ pub const host_preferred_user_arch_data_t = host_preferred_user_arch; pub const host_preferred_user_arch_t = *host_preferred_user_arch; fn HostCount(comptime HT: type) mach_msg_type_number_t { - return @intCast(mach_msg_type_number_t, @sizeOf(HT) / @sizeOf(integer_t)); + return @as(mach_msg_type_number_t, @intCast(@sizeOf(HT) / @sizeOf(integer_t))); } pub const HOST = struct { diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig index 912bb9905633..6782aa098a18 100644 --- a/lib/std/c/dragonfly.zig +++ b/lib/std/c/dragonfly.zig @@ -172,7 +172,7 @@ pub const PROT = struct { pub const MAP = struct { pub const FILE = 0; - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const ANONYMOUS = ANON; pub const COPY = PRIVATE; pub const SHARED = 1; @@ -208,7 +208,7 @@ pub const W = struct { pub const TRAPPED = 0x0020; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s & 0xff00) >> 8); + return @as(u8, @intCast((s & 0xff00) >> 8)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -220,7 +220,7 @@ pub const W = struct { return TERMSIG(s) == 0; } pub fn IFSTOPPED(s: u32) bool { - return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; + return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00; } pub fn IFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; @@ -620,9 +620,9 @@ pub const S = struct { pub const BADSIG = SIG.ERR; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); pub const BLOCK = 1; pub const UNBLOCK = 2; @@ -871,10 +871,10 @@ pub const RTLD = struct { pub const NODELETE = 0x01000; pub const NOLOAD = 0x02000; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); - pub const ALL = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); + pub const ALL = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4))))); }; pub const dl_phdr_info = extern struct { diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig index 7a265ac2b313..deec41493d38 100644 --- a/lib/std/c/freebsd.zig +++ b/lib/std/c/freebsd.zig @@ -20,11 +20,11 @@ fn __BIT_COUNT(bits: []const c_long) c_long { fn __BIT_MASK(s: usize) c_long { var x = s % CPU_SETSIZE; - return @bitCast(c_long, @intCast(c_ulong, 1) << @intCast(u6, x)); + return @as(c_long, @bitCast(@as(c_ulong, @intCast(1)) << @as(u6, @intCast(x)))); } pub fn CPU_COUNT(set: cpuset_t) c_int { - return @intCast(c_int, __BIT_COUNT(set.__bits[0..])); + return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..]))); } pub fn CPU_ZERO(set: *cpuset_t) void { @@ -529,7 +529,7 @@ pub const cap_rights_t = extern struct { pub const CAP = struct { pub fn RIGHT(idx: u6, bit: u64) u64 { - return (@intCast(u64, 1) << (57 + idx)) | bit; + return (@as(u64, @intCast(1)) << (57 + idx)) | bit; } pub const READ = CAP.RIGHT(0, 0x0000000000000001); pub const WRITE = CAP.RIGHT(0, 0x0000000000000002); @@ -961,7 +961,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const FIXED = 0x0010; @@ -1013,7 +1013,7 @@ pub const W = struct { pub const TRAPPED = 32; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s & 0xff00) >> 8); + return @as(u8, @intCast((s & 0xff00) >> 8)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -1025,7 +1025,7 @@ pub const W = struct { return TERMSIG(s) == 0; } pub fn IFSTOPPED(s: u32) bool { - return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; + return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00; } pub fn IFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; @@ -1086,9 +1086,9 @@ pub const SIG = struct { pub const UNBLOCK = 2; pub const SETMASK = 3; - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); pub const WORDS = 4; pub const MAXSIG = 128; @@ -2626,7 +2626,7 @@ pub const domainset_t = extern struct { }; pub fn DOMAINSET_COUNT(set: domainset_t) c_int { - return @intCast(c_int, __BIT_COUNT(set.__bits[0..])); + return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..]))); } pub const domainset = extern struct { @@ -2650,7 +2650,7 @@ const ioctl_cmd = enum(u32) { }; fn ioImpl(cmd: ioctl_cmd, op: u8, nr: u8, comptime IT: type) u32 { - return @bitCast(u32, @intFromEnum(cmd) | @intCast(u32, @truncate(u8, @sizeOf(IT))) << 16 | @intCast(u32, op) << 8 | nr); + return @as(u32, @bitCast(@intFromEnum(cmd) | @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IT))))) << 16 | @as(u32, @intCast(op)) << 8 | nr)); } pub fn IO(op: u8, nr: u8) u32 { diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig index 2f9917a0f3d8..c47ceeb00362 100644 --- a/lib/std/c/haiku.zig +++ b/lib/std/c/haiku.zig @@ -414,7 +414,7 @@ pub const CLOCK = struct { pub const MAP = struct { /// mmap() error return code - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); /// changes are seen by others pub const SHARED = 0x01; /// changes are only seen by caller @@ -443,7 +443,7 @@ pub const W = struct { pub const NOWAIT = 0x20; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, s & 0xff); + return @as(u8, @intCast(s & 0xff)); } pub fn TERMSIG(s: u32) u32 { @@ -481,9 +481,9 @@ pub const SA = struct { }; pub const SIG = struct { - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); pub const HUP = 1; pub const INT = 2; diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig index d3a3bfdeba8a..ddc488e11521 100644 --- a/lib/std/c/linux.zig +++ b/lib/std/c/linux.zig @@ -32,7 +32,7 @@ pub const MADV = linux.MADV; pub const MAP = struct { pub usingnamespace linux.MAP; /// Only used by libc to communicate failure. - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); }; pub const MSF = linux.MSF; pub const MMAP2_UNIT = linux.MMAP2_UNIT; diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig index 2c7c236ed09e..1fc078428737 100644 --- a/lib/std/c/netbsd.zig +++ b/lib/std/c/netbsd.zig @@ -172,9 +172,9 @@ pub const RTLD = struct { pub const NODELETE = 0x01000; pub const NOLOAD = 0x02000; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); }; pub const dl_phdr_info = extern struct { @@ -597,7 +597,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const REMAPDUP = 0x0004; @@ -653,7 +653,7 @@ pub const W = struct { pub const TRAPPED = 0x00000040; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s >> 8) & 0xff); + return @as(u8, @intCast((s >> 8) & 0xff)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -1106,9 +1106,9 @@ pub const winsize = extern struct { const NSIG = 32; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); pub const WORDS = 4; pub const MAXSIG = 128; diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig index 47c1aec862e6..06085903e4e3 100644 --- a/lib/std/c/openbsd.zig +++ b/lib/std/c/openbsd.zig @@ -449,7 +449,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const FIXED = 0x0010; @@ -488,7 +488,7 @@ pub const W = struct { pub const CONTINUED = 8; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s >> 8) & 0xff); + return @as(u8, @intCast((s >> 8) & 0xff)); } pub fn TERMSIG(s: u32) u32 { return (s & 0x7f); @@ -1000,11 +1000,11 @@ pub const winsize = extern struct { const NSIG = 33; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const CATCH = @ptrFromInt(?Sigaction.handler_fn, 2); - pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 3); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const CATCH = @as(?Sigaction.handler_fn, @ptrFromInt(2)); + pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(3)); pub const HUP = 1; pub const INT = 2; diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig index 511bf9ccc54b..cbca1805bb6e 100644 --- a/lib/std/c/solaris.zig +++ b/lib/std/c/solaris.zig @@ -111,10 +111,10 @@ pub const RTLD = struct { pub const FIRST = 0x02000; pub const CONFGEN = 0x10000; - pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))); - pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2))); - pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3))); - pub const PROBE = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4))); + pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))); + pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2))))); + pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3))))); + pub const PROBE = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4))))); }; pub const Flock = extern struct { @@ -524,7 +524,7 @@ pub const CLOCK = struct { }; pub const MAP = struct { - pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize)); + pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize))); pub const SHARED = 0x0001; pub const PRIVATE = 0x0002; pub const TYPE = 0x000f; @@ -583,7 +583,7 @@ pub const W = struct { pub const NOWAIT = 0o200; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s >> 8) & 0xff); + return @as(u8, @intCast((s >> 8) & 0xff)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -886,10 +886,10 @@ pub const winsize = extern struct { const NSIG = 75; pub const SIG = struct { - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); - pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 2); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); + pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(2)); pub const WORDS = 4; pub const MAXSIG = 75; @@ -1441,7 +1441,7 @@ pub const AT = struct { /// Magic value that specify the use of the current working directory /// to determine the target of relative file paths in the openat() and /// similar syscalls. - pub const FDCWD = @bitCast(fd_t, @as(u32, 0xffd19553)); + pub const FDCWD = @as(fd_t, @bitCast(@as(u32, 0xffd19553))); /// Do not follow symbolic links pub const SYMLINK_NOFOLLOW = 0x1000; @@ -1907,9 +1907,9 @@ const IoCtlCommand = enum(u32) { }; fn ioImpl(cmd: IoCtlCommand, io_type: u8, nr: u8, comptime IOT: type) i32 { - const size = @intCast(u32, @truncate(u8, @sizeOf(IOT))) << 16; - const t = @intCast(u32, io_type) << 8; - return @bitCast(i32, @intFromEnum(cmd) | size | t | nr); + const size = @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IOT))))) << 16; + const t = @as(u32, @intCast(io_type)) << 8; + return @as(i32, @bitCast(@intFromEnum(cmd) | size | t | nr)); } pub fn IO(io_type: u8, nr: u8) i32 { diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 636ef7f4d726..9f4d75084fd8 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -93,7 +93,7 @@ pub const ChildProcess = struct { switch (builtin.os.tag) { .linux => { if (rus.rusage) |ru| { - return @intCast(usize, ru.maxrss) * 1024; + return @as(usize, @intCast(ru.maxrss)) * 1024; } else { return null; } @@ -108,7 +108,7 @@ pub const ChildProcess = struct { .macos, .ios => { if (rus.rusage) |ru| { // Darwin oddly reports in bytes instead of kilobytes. - return @intCast(usize, ru.maxrss); + return @as(usize, @intCast(ru.maxrss)); } else { return null; } @@ -376,7 +376,7 @@ pub const ChildProcess = struct { if (windows.kernel32.GetExitCodeProcess(self.id, &exit_code) == 0) { break :x Term{ .Unknown = 0 }; } else { - break :x Term{ .Exited = @truncate(u8, exit_code) }; + break :x Term{ .Exited = @as(u8, @truncate(exit_code)) }; } }); @@ -449,7 +449,7 @@ pub const ChildProcess = struct { // has a value greater than 0 if ((fd[0].revents & std.os.POLL.IN) != 0) { const err_int = try readIntFd(err_pipe[0]); - return @errSetCast(SpawnError, @errorFromInt(err_int)); + return @as(SpawnError, @errSetCast(@errorFromInt(err_int))); } } else { // Write maxInt(ErrInt) to the write end of the err_pipe. This is after @@ -462,7 +462,7 @@ pub const ChildProcess = struct { // Here we potentially return the fork child's error from the parent // pid. if (err_int != maxInt(ErrInt)) { - return @errSetCast(SpawnError, @errorFromInt(err_int)); + return @as(SpawnError, @errSetCast(@errorFromInt(err_int))); } } } @@ -542,7 +542,7 @@ pub const ChildProcess = struct { } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); + break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr)); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -605,7 +605,7 @@ pub const ChildProcess = struct { } // we are the parent - const pid = @intCast(i32, pid_result); + const pid = @as(i32, @intCast(pid_result)); if (self.stdin_behavior == StdIo.Pipe) { self.stdin = File{ .handle = stdin_pipe[1] }; } else { @@ -1015,11 +1015,11 @@ fn windowsCreateProcessPathExt( else => return windows.unexpectedStatus(rc), } - const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf); + const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf)); if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) { break :found_name null; } - break :found_name @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2]; + break :found_name @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2]; }; const unappended_err = unappended: { @@ -1104,7 +1104,7 @@ fn windowsCreateProcessPathExt( else => return windows.unexpectedStatus(rc), } - const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf); + const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf)); // Skip directories if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) continue; @@ -1164,7 +1164,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1 null, windows.TRUE, windows.CREATE_UNICODE_ENVIRONMENT, - @ptrCast(?*anyopaque, envp_ptr), + @as(?*anyopaque, @ptrCast(envp_ptr)), cwd_ptr, lpStartupInfo, lpProcessInformation, @@ -1376,7 +1376,7 @@ fn writeIntFd(fd: i32, value: ErrInt) !void { .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; - file.writer().writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources; + file.writer().writeIntNative(u64, @as(u64, @intCast(value))) catch return error.SystemResources; } fn readIntFd(fd: i32) !ErrInt { @@ -1385,7 +1385,7 @@ fn readIntFd(fd: i32) !ErrInt { .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; - return @intCast(ErrInt, file.reader().readIntNative(u64) catch return error.SystemResources); + return @as(ErrInt, @intCast(file.reader().readIntNative(u64) catch return error.SystemResources)); } /// Caller must free result. diff --git a/lib/std/coff.zig b/lib/std/coff.zig index d28e54b94cee..a08c2c514d18 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -457,12 +457,12 @@ pub const ImportLookupEntry32 = struct { pub fn getImportByName(raw: u32) ?ByName { if (mask & raw != 0) return null; - return @bitCast(ByName, raw); + return @as(ByName, @bitCast(raw)); } pub fn getImportByOrdinal(raw: u32) ?ByOrdinal { if (mask & raw == 0) return null; - return @bitCast(ByOrdinal, raw); + return @as(ByOrdinal, @bitCast(raw)); } }; @@ -483,12 +483,12 @@ pub const ImportLookupEntry64 = struct { pub fn getImportByName(raw: u64) ?ByName { if (mask & raw != 0) return null; - return @bitCast(ByName, raw); + return @as(ByName, @bitCast(raw)); } pub fn getImportByOrdinal(raw: u64) ?ByOrdinal { if (mask & raw == 0) return null; - return @bitCast(ByOrdinal, raw); + return @as(ByOrdinal, @bitCast(raw)); } }; @@ -1146,25 +1146,25 @@ pub const Coff = struct { } pub fn getCoffHeader(self: Coff) CoffHeader { - return @ptrCast(*align(1) const CoffHeader, self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)]).*; + return @as(*align(1) const CoffHeader, @ptrCast(self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)])).*; } pub fn getOptionalHeader(self: Coff) OptionalHeader { assert(self.is_image); const offset = self.coff_header_offset + @sizeOf(CoffHeader); - return @ptrCast(*align(1) const OptionalHeader, self.data[offset..][0..@sizeOf(OptionalHeader)]).*; + return @as(*align(1) const OptionalHeader, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader)])).*; } pub fn getOptionalHeader32(self: Coff) OptionalHeaderPE32 { assert(self.is_image); const offset = self.coff_header_offset + @sizeOf(CoffHeader); - return @ptrCast(*align(1) const OptionalHeaderPE32, self.data[offset..][0..@sizeOf(OptionalHeaderPE32)]).*; + return @as(*align(1) const OptionalHeaderPE32, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE32)])).*; } pub fn getOptionalHeader64(self: Coff) OptionalHeaderPE64 { assert(self.is_image); const offset = self.coff_header_offset + @sizeOf(CoffHeader); - return @ptrCast(*align(1) const OptionalHeaderPE64, self.data[offset..][0..@sizeOf(OptionalHeaderPE64)]).*; + return @as(*align(1) const OptionalHeaderPE64, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE64)])).*; } pub fn getImageBase(self: Coff) u64 { @@ -1193,7 +1193,7 @@ pub const Coff = struct { else => unreachable, // We assume we have validated the header already }; const offset = self.coff_header_offset + @sizeOf(CoffHeader) + size; - return @ptrCast([*]align(1) const ImageDataDirectory, self.data[offset..])[0..self.getNumberOfDataDirectories()]; + return @as([*]align(1) const ImageDataDirectory, @ptrCast(self.data[offset..]))[0..self.getNumberOfDataDirectories()]; } pub fn getSymtab(self: *const Coff) ?Symtab { @@ -1217,7 +1217,7 @@ pub const Coff = struct { pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader { const coff_header = self.getCoffHeader(); const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header; - return @ptrCast([*]align(1) const SectionHeader, self.data.ptr + offset)[0..coff_header.number_of_sections]; + return @as([*]align(1) const SectionHeader, @ptrCast(self.data.ptr + offset))[0..coff_header.number_of_sections]; } pub fn getSectionHeadersAlloc(self: *const Coff, allocator: mem.Allocator) ![]SectionHeader { @@ -1303,9 +1303,9 @@ pub const Symtab = struct { return .{ .name = raw[0..8].*, .value = mem.readIntLittle(u32, raw[8..12]), - .section_number = @enumFromInt(SectionNumber, mem.readIntLittle(u16, raw[12..14])), - .type = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])), - .storage_class = @enumFromInt(StorageClass, raw[16]), + .section_number = @as(SectionNumber, @enumFromInt(mem.readIntLittle(u16, raw[12..14]))), + .type = @as(SymType, @bitCast(mem.readIntLittle(u16, raw[14..16]))), + .storage_class = @as(StorageClass, @enumFromInt(raw[16])), .number_of_aux_symbols = raw[17], }; } @@ -1333,7 +1333,7 @@ pub const Symtab = struct { fn asWeakExtDef(raw: []const u8) WeakExternalDefinition { return .{ .tag_index = mem.readIntLittle(u32, raw[0..4]), - .flag = @enumFromInt(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])), + .flag = @as(WeakExternalFlag, @enumFromInt(mem.readIntLittle(u32, raw[4..8]))), .unused = raw[8..18].*, }; } @@ -1351,7 +1351,7 @@ pub const Symtab = struct { .number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]), .checksum = mem.readIntLittle(u32, raw[8..12]), .number = mem.readIntLittle(u16, raw[12..14]), - .selection = @enumFromInt(ComdatSelection, raw[14]), + .selection = @as(ComdatSelection, @enumFromInt(raw[14])), .unused = raw[15..18].*, }; } @@ -1384,6 +1384,6 @@ pub const Strtab = struct { pub fn get(self: Strtab, off: u32) []const u8 { assert(off < self.buffer.len); - return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.ptr + off)), 0); } }; diff --git a/lib/std/compress/deflate/bits_utils.zig b/lib/std/compress/deflate/bits_utils.zig index 85bae95bc843..4b440dc44e56 100644 --- a/lib/std/compress/deflate/bits_utils.zig +++ b/lib/std/compress/deflate/bits_utils.zig @@ -3,7 +3,7 @@ const math = @import("std").math; // Reverse bit-by-bit a N-bit code. pub fn bitReverse(comptime T: type, value: T, N: usize) T { const r = @bitReverse(value); - return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N); + return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).Int.bits - N)); } test "bitReverse" { diff --git a/lib/std/compress/deflate/compressor.zig b/lib/std/compress/deflate/compressor.zig index e2cbafe520d2..72de63f162db 100644 --- a/lib/std/compress/deflate/compressor.zig +++ b/lib/std/compress/deflate/compressor.zig @@ -160,7 +160,7 @@ fn matchLen(a: []u8, b: []u8, max: u32) u32 { var bounded_b = b[0..max]; for (bounded_a, 0..) |av, i| { if (bounded_b[i] != av) { - return @intCast(u32, i); + return @as(u32, @intCast(i)); } } return max; @@ -313,14 +313,14 @@ pub fn Compressor(comptime WriterType: anytype) type { // the entire table onto the stack (https://golang.org/issue/18625). for (self.hash_prev, 0..) |v, i| { if (v > delta) { - self.hash_prev[i] = @intCast(u32, v - delta); + self.hash_prev[i] = @as(u32, @intCast(v - delta)); } else { self.hash_prev[i] = 0; } } for (self.hash_head, 0..) |v, i| { if (v > delta) { - self.hash_head[i] = @intCast(u32, v - delta); + self.hash_head[i] = @as(u32, @intCast(v - delta)); } else { self.hash_head[i] = 0; } @@ -329,7 +329,7 @@ pub fn Compressor(comptime WriterType: anytype) type { } const n = std.compress.deflate.copy(self.window[self.window_end..], b); self.window_end += n; - return @intCast(u32, n); + return @as(u32, @intCast(n)); } fn writeBlock(self: *Self, tokens: []token.Token, index: usize) !void { @@ -398,13 +398,13 @@ pub fn Compressor(comptime WriterType: anytype) type { // Our chain should point to the previous value. self.hash_prev[di & window_mask] = hh.*; // Set the head of the hash chain to us. - hh.* = @intCast(u32, di + self.hash_offset); + hh.* = @as(u32, @intCast(di + self.hash_offset)); } self.hash = new_h; } // Update window information. self.window_end = n; - self.index = @intCast(u32, n); + self.index = @as(u32, @intCast(n)); } const Match = struct { @@ -471,11 +471,11 @@ pub fn Compressor(comptime WriterType: anytype) type { break; } - if (@intCast(u32, self.hash_prev[i & window_mask]) < self.hash_offset) { + if (@as(u32, @intCast(self.hash_prev[i & window_mask])) < self.hash_offset) { break; } - i = @intCast(u32, self.hash_prev[i & window_mask]) - self.hash_offset; + i = @as(u32, @intCast(self.hash_prev[i & window_mask])) - self.hash_offset; if (i < min_index) { break; } @@ -576,7 +576,7 @@ pub fn Compressor(comptime WriterType: anytype) type { // Flush current output block if any. if (self.byte_available) { // There is still one pending token that needs to be flushed - self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[self.index - 1])); + self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[self.index - 1]))); self.tokens_count += 1; self.byte_available = false; } @@ -591,9 +591,9 @@ pub fn Compressor(comptime WriterType: anytype) type { // Update the hash self.hash = hash4(self.window[self.index .. self.index + min_match_length]); var hh = &self.hash_head[self.hash & hash_mask]; - self.chain_head = @intCast(u32, hh.*); - self.hash_prev[self.index & window_mask] = @intCast(u32, self.chain_head); - hh.* = @intCast(u32, self.index + self.hash_offset); + self.chain_head = @as(u32, @intCast(hh.*)); + self.hash_prev[self.index & window_mask] = @as(u32, @intCast(self.chain_head)); + hh.* = @as(u32, @intCast(self.index + self.hash_offset)); } var prev_length = self.length; var prev_offset = self.offset; @@ -614,7 +614,7 @@ pub fn Compressor(comptime WriterType: anytype) type { self.index, self.chain_head -| self.hash_offset, min_match_length - 1, - @intCast(u32, lookahead), + @as(u32, @intCast(lookahead)), ); if (fmatch.ok) { self.length = fmatch.length; @@ -631,12 +631,12 @@ pub fn Compressor(comptime WriterType: anytype) type { // There was a match at the previous step, and the current match is // not better. Output the previous match. if (self.compression_level.fast_skip_hashshing != skip_never) { - self.tokens[self.tokens_count] = token.matchToken(@intCast(u32, self.length - base_match_length), @intCast(u32, self.offset - base_match_offset)); + self.tokens[self.tokens_count] = token.matchToken(@as(u32, @intCast(self.length - base_match_length)), @as(u32, @intCast(self.offset - base_match_offset))); self.tokens_count += 1; } else { self.tokens[self.tokens_count] = token.matchToken( - @intCast(u32, prev_length - base_match_length), - @intCast(u32, prev_offset -| base_match_offset), + @as(u32, @intCast(prev_length - base_match_length)), + @as(u32, @intCast(prev_offset -| base_match_offset)), ); self.tokens_count += 1; } @@ -661,7 +661,7 @@ pub fn Compressor(comptime WriterType: anytype) type { var hh = &self.hash_head[self.hash & hash_mask]; self.hash_prev[index & window_mask] = hh.*; // Set the head of the hash chain to us. - hh.* = @intCast(u32, index + self.hash_offset); + hh.* = @as(u32, @intCast(index + self.hash_offset)); } } self.index = index; @@ -689,7 +689,7 @@ pub fn Compressor(comptime WriterType: anytype) type { if (self.compression_level.fast_skip_hashshing != skip_never) { i = self.index; } - self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[i])); + self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[i]))); self.tokens_count += 1; if (self.tokens_count == max_flate_block_tokens) { try self.writeBlock(self.tokens[0..self.tokens_count], i + 1); @@ -707,7 +707,7 @@ pub fn Compressor(comptime WriterType: anytype) type { fn fillStore(self: *Self, b: []const u8) u32 { const n = std.compress.deflate.copy(self.window[self.window_end..], b); self.window_end += n; - return @intCast(u32, n); + return @as(u32, @intCast(n)); } fn store(self: *Self) !void { diff --git a/lib/std/compress/deflate/compressor_test.zig b/lib/std/compress/deflate/compressor_test.zig index 858da8d8b5ff..5012bb3c074c 100644 --- a/lib/std/compress/deflate/compressor_test.zig +++ b/lib/std/compress/deflate/compressor_test.zig @@ -172,7 +172,7 @@ test "deflate/inflate" { defer testing.allocator.free(large_data_chunk); // fill with random data for (large_data_chunk, 0..) |_, i| { - large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i); + large_data_chunk[i] = @as(u8, @truncate(i)) *% @as(u8, @truncate(i)); } try testToFromWithLimit(large_data_chunk, limits); } diff --git a/lib/std/compress/deflate/decompressor.zig b/lib/std/compress/deflate/decompressor.zig index 40bde673263f..3f6ee151ba54 100644 --- a/lib/std/compress/deflate/decompressor.zig +++ b/lib/std/compress/deflate/decompressor.zig @@ -130,30 +130,30 @@ const HuffmanDecoder = struct { // Exception: To be compatible with zlib, we also need to // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. - if (code != @as(u32, 1) << @intCast(u5, max) and !(code == 1 and max == 1)) { + if (code != @as(u32, 1) << @as(u5, @intCast(max)) and !(code == 1 and max == 1)) { return false; } self.min = min; if (max > huffman_chunk_bits) { - var num_links = @as(u32, 1) << @intCast(u5, max - huffman_chunk_bits); - self.link_mask = @intCast(u32, num_links - 1); + var num_links = @as(u32, 1) << @as(u5, @intCast(max - huffman_chunk_bits)); + self.link_mask = @as(u32, @intCast(num_links - 1)); // create link tables var link = next_code[huffman_chunk_bits + 1] >> 1; self.links = try self.allocator.alloc([]u16, huffman_num_chunks - link); self.sub_chunks = ArrayList(u32).init(self.allocator); self.initialized = true; - var j = @intCast(u32, link); + var j = @as(u32, @intCast(link)); while (j < huffman_num_chunks) : (j += 1) { - var reverse = @intCast(u32, bu.bitReverse(u16, @intCast(u16, j), 16)); - reverse >>= @intCast(u32, 16 - huffman_chunk_bits); - var off = j - @intCast(u32, link); + var reverse = @as(u32, @intCast(bu.bitReverse(u16, @as(u16, @intCast(j)), 16))); + reverse >>= @as(u32, @intCast(16 - huffman_chunk_bits)); + var off = j - @as(u32, @intCast(link)); if (sanity) { // check we are not overwriting an existing chunk assert(self.chunks[reverse] == 0); } - self.chunks[reverse] = @intCast(u16, off << huffman_value_shift | (huffman_chunk_bits + 1)); + self.chunks[reverse] = @as(u16, @intCast(off << huffman_value_shift | (huffman_chunk_bits + 1))); self.links[off] = try self.allocator.alloc(u16, num_links); if (sanity) { // initialize to a known invalid chunk code (0) to see if we overwrite @@ -170,12 +170,12 @@ const HuffmanDecoder = struct { } var ncode = next_code[n]; next_code[n] += 1; - var chunk = @intCast(u16, (li << huffman_value_shift) | n); - var reverse = @intCast(u16, bu.bitReverse(u16, @intCast(u16, ncode), 16)); - reverse >>= @intCast(u4, 16 - n); + var chunk = @as(u16, @intCast((li << huffman_value_shift) | n)); + var reverse = @as(u16, @intCast(bu.bitReverse(u16, @as(u16, @intCast(ncode)), 16))); + reverse >>= @as(u4, @intCast(16 - n)); if (n <= huffman_chunk_bits) { var off = reverse; - while (off < self.chunks.len) : (off += @as(u16, 1) << @intCast(u4, n)) { + while (off < self.chunks.len) : (off += @as(u16, 1) << @as(u4, @intCast(n))) { // We should never need to overwrite // an existing chunk. Also, 0 is // never a valid chunk, because the @@ -198,12 +198,12 @@ const HuffmanDecoder = struct { var link_tab = self.links[value]; reverse >>= huffman_chunk_bits; var off = reverse; - while (off < link_tab.len) : (off += @as(u16, 1) << @intCast(u4, n - huffman_chunk_bits)) { + while (off < link_tab.len) : (off += @as(u16, 1) << @as(u4, @intCast(n - huffman_chunk_bits))) { if (sanity) { // check we are not overwriting an existing chunk assert(link_tab[off] == 0); } - link_tab[off] = @intCast(u16, chunk); + link_tab[off] = @as(u16, @intCast(chunk)); } } } @@ -494,21 +494,21 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < 5 + 5 + 4) { try self.moreBits(); } - var nlit = @intCast(u32, self.b & 0x1F) + 257; + var nlit = @as(u32, @intCast(self.b & 0x1F)) + 257; if (nlit > max_num_lit) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b >>= 5; - var ndist = @intCast(u32, self.b & 0x1F) + 1; + var ndist = @as(u32, @intCast(self.b & 0x1F)) + 1; if (ndist > max_num_dist) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b >>= 5; - var nclen = @intCast(u32, self.b & 0xF) + 4; + var nclen = @as(u32, @intCast(self.b & 0xF)) + 4; // num_codes is 19, so nclen is always valid. self.b >>= 4; self.nb -= 5 + 5 + 4; @@ -519,7 +519,7 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < 3) { try self.moreBits(); } - self.codebits[code_order[i]] = @intCast(u32, self.b & 0x7); + self.codebits[code_order[i]] = @as(u32, @intCast(self.b & 0x7)); self.b >>= 3; self.nb -= 3; } @@ -575,8 +575,8 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < nb) { try self.moreBits(); } - rep += @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1); - self.b >>= @intCast(u5, nb); + rep += @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1)); + self.b >>= @as(u5, @intCast(nb)); self.nb -= nb; if (i + rep > n) { corrupt_input_error_offset = self.roffset; @@ -623,7 +623,7 @@ pub fn Decompressor(comptime ReaderType: type) type { var length: u32 = 0; switch (v) { 0...255 => { - self.dict.writeByte(@intCast(u8, v)); + self.dict.writeByte(@as(u8, @intCast(v))); if (self.dict.availWrite() == 0) { self.to_read = self.dict.readFlush(); self.step = huffmanBlock; @@ -676,8 +676,8 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < n) { try self.moreBits(); } - length += @intCast(u32, self.b) & ((@as(u32, 1) << @intCast(u5, n)) - 1); - self.b >>= @intCast(u5, n); + length += @as(u32, @intCast(self.b)) & ((@as(u32, 1) << @as(u5, @intCast(n))) - 1); + self.b >>= @as(u5, @intCast(n)); self.nb -= n; } @@ -686,9 +686,9 @@ pub fn Decompressor(comptime ReaderType: type) type { while (self.nb < 5) { try self.moreBits(); } - dist = @intCast( + dist = @as( u32, - bu.bitReverse(u8, @intCast(u8, (self.b & 0x1F) << 3), 8), + @intCast(bu.bitReverse(u8, @as(u8, @intCast((self.b & 0x1F) << 3)), 8)), ); self.b >>= 5; self.nb -= 5; @@ -699,16 +699,16 @@ pub fn Decompressor(comptime ReaderType: type) type { switch (dist) { 0...3 => dist += 1, 4...max_num_dist - 1 => { // 4...29 - var nb = @intCast(u32, dist - 2) >> 1; + var nb = @as(u32, @intCast(dist - 2)) >> 1; // have 1 bit in bottom of dist, need nb more. - var extra = (dist & 1) << @intCast(u5, nb); + var extra = (dist & 1) << @as(u5, @intCast(nb)); while (self.nb < nb) { try self.moreBits(); } - extra |= @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1); - self.b >>= @intCast(u5, nb); + extra |= @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1)); + self.b >>= @as(u5, @intCast(nb)); self.nb -= nb; - dist = (@as(u32, 1) << @intCast(u5, nb + 1)) + 1 + extra; + dist = (@as(u32, 1) << @as(u5, @intCast(nb + 1))) + 1 + extra; }, else => { corrupt_input_error_offset = self.roffset; @@ -762,10 +762,10 @@ pub fn Decompressor(comptime ReaderType: type) type { self.err = InflateError.UnexpectedEndOfStream; return InflateError.UnexpectedEndOfStream; }; - self.roffset += @intCast(u64, nr); - var n = @intCast(u32, self.buf[0]) | @intCast(u32, self.buf[1]) << 8; - var nn = @intCast(u32, self.buf[2]) | @intCast(u32, self.buf[3]) << 8; - if (@intCast(u16, nn) != @truncate(u16, ~n)) { + self.roffset += @as(u64, @intCast(nr)); + var n = @as(u32, @intCast(self.buf[0])) | @as(u32, @intCast(self.buf[1])) << 8; + var nn = @as(u32, @intCast(self.buf[2])) | @as(u32, @intCast(self.buf[3])) << 8; + if (@as(u16, @intCast(nn)) != @as(u16, @truncate(~n))) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; @@ -793,9 +793,9 @@ pub fn Decompressor(comptime ReaderType: type) type { if (cnt < buf.len) { self.err = InflateError.UnexpectedEndOfStream; } - self.roffset += @intCast(u64, cnt); - self.copy_len -= @intCast(u32, cnt); - self.dict.writeMark(@intCast(u32, cnt)); + self.roffset += @as(u64, @intCast(cnt)); + self.copy_len -= @as(u32, @intCast(cnt)); + self.dict.writeMark(@as(u32, @intCast(cnt))); if (self.err != null) { return InflateError.UnexpectedEndOfStream; } @@ -826,7 +826,7 @@ pub fn Decompressor(comptime ReaderType: type) type { return InflateError.BadReaderState; }; self.roffset += 1; - self.b |= @as(u32, c) << @intCast(u5, self.nb); + self.b |= @as(u32, c) << @as(u5, @intCast(self.nb)); self.nb += 8; return; } @@ -854,14 +854,14 @@ pub fn Decompressor(comptime ReaderType: type) type { return InflateError.BadReaderState; }; self.roffset += 1; - b |= @intCast(u32, c) << @intCast(u5, nb & 31); + b |= @as(u32, @intCast(c)) << @as(u5, @intCast(nb & 31)); nb += 8; } var chunk = h.chunks[b & (huffman_num_chunks - 1)]; - n = @intCast(u32, chunk & huffman_count_mask); + n = @as(u32, @intCast(chunk & huffman_count_mask)); if (n > huffman_chunk_bits) { chunk = h.links[chunk >> huffman_value_shift][(b >> huffman_chunk_bits) & h.link_mask]; - n = @intCast(u32, chunk & huffman_count_mask); + n = @as(u32, @intCast(chunk & huffman_count_mask)); } if (n <= nb) { if (n == 0) { @@ -871,9 +871,9 @@ pub fn Decompressor(comptime ReaderType: type) type { self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } - self.b = b >> @intCast(u5, n & 31); + self.b = b >> @as(u5, @intCast(n & 31)); self.nb = nb - n; - return @intCast(u32, chunk >> huffman_value_shift); + return @as(u32, @intCast(chunk >> huffman_value_shift)); } } } diff --git a/lib/std/compress/deflate/deflate_fast.zig b/lib/std/compress/deflate/deflate_fast.zig index c86d181cb59b..a11548fa1fa1 100644 --- a/lib/std/compress/deflate/deflate_fast.zig +++ b/lib/std/compress/deflate/deflate_fast.zig @@ -30,23 +30,23 @@ const table_size = 1 << table_bits; // Size of the table. const buffer_reset = math.maxInt(i32) - max_store_block_size * 2; fn load32(b: []u8, i: i32) u32 { - var s = b[@intCast(usize, i) .. @intCast(usize, i) + 4]; - return @intCast(u32, s[0]) | - @intCast(u32, s[1]) << 8 | - @intCast(u32, s[2]) << 16 | - @intCast(u32, s[3]) << 24; + var s = b[@as(usize, @intCast(i)) .. @as(usize, @intCast(i)) + 4]; + return @as(u32, @intCast(s[0])) | + @as(u32, @intCast(s[1])) << 8 | + @as(u32, @intCast(s[2])) << 16 | + @as(u32, @intCast(s[3])) << 24; } fn load64(b: []u8, i: i32) u64 { - var s = b[@intCast(usize, i)..@intCast(usize, i + 8)]; - return @intCast(u64, s[0]) | - @intCast(u64, s[1]) << 8 | - @intCast(u64, s[2]) << 16 | - @intCast(u64, s[3]) << 24 | - @intCast(u64, s[4]) << 32 | - @intCast(u64, s[5]) << 40 | - @intCast(u64, s[6]) << 48 | - @intCast(u64, s[7]) << 56; + var s = b[@as(usize, @intCast(i))..@as(usize, @intCast(i + 8))]; + return @as(u64, @intCast(s[0])) | + @as(u64, @intCast(s[1])) << 8 | + @as(u64, @intCast(s[2])) << 16 | + @as(u64, @intCast(s[3])) << 24 | + @as(u64, @intCast(s[4])) << 32 | + @as(u64, @intCast(s[5])) << 40 | + @as(u64, @intCast(s[6])) << 48 | + @as(u64, @intCast(s[7])) << 56; } fn hash(u: u32) u32 { @@ -117,7 +117,7 @@ pub const DeflateFast = struct { // s_limit is when to stop looking for offset/length copies. The input_margin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. - var s_limit = @intCast(i32, src.len - input_margin); + var s_limit = @as(i32, @intCast(src.len - input_margin)); // next_emit is where in src the next emitLiteral should start from. var next_emit: i32 = 0; @@ -170,7 +170,7 @@ pub const DeflateFast = struct { // A 4-byte match has been found. We'll later see if more than 4 bytes // match. But, prior to the match, src[next_emit..s] are unmatched. Emit // them as literal bytes. - emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..@intCast(usize, s)]); + emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..@as(usize, @intCast(s))]); // Call emitCopy, and then see if another emitCopy could be our next // move. Repeat until we find no match for the input immediately after @@ -192,8 +192,8 @@ pub const DeflateFast = struct { // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) dst[tokens_count.*] = token.matchToken( - @intCast(u32, l + 4 - base_match_length), - @intCast(u32, s - t - base_match_offset), + @as(u32, @intCast(l + 4 - base_match_length)), + @as(u32, @intCast(s - t - base_match_offset)), ); tokens_count.* += 1; s += l; @@ -209,22 +209,22 @@ pub const DeflateFast = struct { // are faster as one load64 call (with some shifts) instead of // three load32 calls. var x = load64(src, s - 1); - var prev_hash = hash(@truncate(u32, x)); + var prev_hash = hash(@as(u32, @truncate(x))); self.table[prev_hash & table_mask] = TableEntry{ .offset = self.cur + s - 1, - .val = @truncate(u32, x), + .val = @as(u32, @truncate(x)), }; x >>= 8; - var curr_hash = hash(@truncate(u32, x)); + var curr_hash = hash(@as(u32, @truncate(x))); candidate = self.table[curr_hash & table_mask]; self.table[curr_hash & table_mask] = TableEntry{ .offset = self.cur + s, - .val = @truncate(u32, x), + .val = @as(u32, @truncate(x)), }; var offset = s - (candidate.offset - self.cur); - if (offset > max_match_offset or @truncate(u32, x) != candidate.val) { - cv = @truncate(u32, x >> 8); + if (offset > max_match_offset or @as(u32, @truncate(x)) != candidate.val) { + cv = @as(u32, @truncate(x >> 8)); next_hash = hash(cv); s += 1; break; @@ -232,18 +232,18 @@ pub const DeflateFast = struct { } } - if (@intCast(u32, next_emit) < src.len) { - emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..]); + if (@as(u32, @intCast(next_emit)) < src.len) { + emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..]); } - self.cur += @intCast(i32, src.len); - self.prev_len = @intCast(u32, src.len); + self.cur += @as(i32, @intCast(src.len)); + self.prev_len = @as(u32, @intCast(src.len)); @memcpy(self.prev[0..self.prev_len], src); return; } fn emitLiteral(dst: []token.Token, tokens_count: *u16, lit: []u8) void { for (lit) |v| { - dst[tokens_count.*] = token.literalToken(@intCast(u32, v)); + dst[tokens_count.*] = token.literalToken(@as(u32, @intCast(v))); tokens_count.* += 1; } return; @@ -253,60 +253,60 @@ pub const DeflateFast = struct { // t can be negative to indicate the match is starting in self.prev. // We assume that src[s-4 .. s] and src[t-4 .. t] already match. fn matchLen(self: *Self, s: i32, t: i32, src: []u8) i32 { - var s1 = @intCast(u32, s) + max_match_length - 4; + var s1 = @as(u32, @intCast(s)) + max_match_length - 4; if (s1 > src.len) { - s1 = @intCast(u32, src.len); + s1 = @as(u32, @intCast(src.len)); } // If we are inside the current block if (t >= 0) { - var b = src[@intCast(usize, t)..]; - var a = src[@intCast(usize, s)..@intCast(usize, s1)]; + var b = src[@as(usize, @intCast(t))..]; + var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))]; b = b[0..a.len]; // Extend the match to be as long as possible. for (a, 0..) |_, i| { if (a[i] != b[i]) { - return @intCast(i32, i); + return @as(i32, @intCast(i)); } } - return @intCast(i32, a.len); + return @as(i32, @intCast(a.len)); } // We found a match in the previous block. - var tp = @intCast(i32, self.prev_len) + t; + var tp = @as(i32, @intCast(self.prev_len)) + t; if (tp < 0) { return 0; } // Extend the match to be as long as possible. - var a = src[@intCast(usize, s)..@intCast(usize, s1)]; - var b = self.prev[@intCast(usize, tp)..@intCast(usize, self.prev_len)]; + var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))]; + var b = self.prev[@as(usize, @intCast(tp))..@as(usize, @intCast(self.prev_len))]; if (b.len > a.len) { b = b[0..a.len]; } a = a[0..b.len]; for (b, 0..) |_, i| { if (a[i] != b[i]) { - return @intCast(i32, i); + return @as(i32, @intCast(i)); } } // If we reached our limit, we matched everything we are // allowed to in the previous block and we return. - var n = @intCast(i32, b.len); - if (@intCast(u32, s + n) == s1) { + var n = @as(i32, @intCast(b.len)); + if (@as(u32, @intCast(s + n)) == s1) { return n; } // Continue looking for more matches in the current block. - a = src[@intCast(usize, s + n)..@intCast(usize, s1)]; + a = src[@as(usize, @intCast(s + n))..@as(usize, @intCast(s1))]; b = src[0..a.len]; for (a, 0..) |_, i| { if (a[i] != b[i]) { - return @intCast(i32, i) + n; + return @as(i32, @intCast(i)) + n; } } - return @intCast(i32, a.len) + n; + return @as(i32, @intCast(a.len)) + n; } // Reset resets the encoding history. @@ -574,7 +574,7 @@ test "best speed match 2/2" { var e = DeflateFast{ .prev = previous, - .prev_len = @intCast(u32, previous.len), + .prev_len = @as(u32, @intCast(previous.len)), .table = undefined, .allocator = undefined, .cur = 0, @@ -617,7 +617,7 @@ test "best speed shift offsets" { try expect(want_first_tokens > want_second_tokens); // Forward the current indicator to before wraparound. - enc.cur = buffer_reset - @intCast(i32, test_data.len); + enc.cur = buffer_reset - @as(i32, @intCast(test_data.len)); // Part 1 before wrap, should match clean state. tokens_count = 0; diff --git a/lib/std/compress/deflate/deflate_fast_test.zig b/lib/std/compress/deflate/deflate_fast_test.zig index 1c771d925ae1..08f6079aa5f6 100644 --- a/lib/std/compress/deflate/deflate_fast_test.zig +++ b/lib/std/compress/deflate/deflate_fast_test.zig @@ -19,7 +19,7 @@ test "best speed" { defer testing.allocator.free(abcabc); for (abcabc, 0..) |_, i| { - abcabc[i] = @intCast(u8, i % 128); + abcabc[i] = @as(u8, @intCast(i % 128)); } var tc_01 = [_]u32{ 65536, 0 }; @@ -119,16 +119,16 @@ test "best speed max match offset" { // zeros1 is between 0 and 30 zeros. // The difference between the two abc's will be offset, which // is max_match_offset plus or minus a small adjustment. - var src_len: usize = @intCast(usize, offset + @as(i32, abc.len) + @intCast(i32, extra)); + var src_len: usize = @as(usize, @intCast(offset + @as(i32, abc.len) + @as(i32, @intCast(extra)))); var src = try testing.allocator.alloc(u8, src_len); defer testing.allocator.free(src); @memcpy(src[0..abc.len], abc); if (!do_match_before) { - const src_offset: usize = @intCast(usize, offset - @as(i32, xyz.len)); + const src_offset: usize = @as(usize, @intCast(offset - @as(i32, xyz.len))); @memcpy(src[src_offset..][0..xyz.len], xyz); } - const src_offset: usize = @intCast(usize, offset); + const src_offset: usize = @as(usize, @intCast(offset)); @memcpy(src[src_offset..][0..abc.len], abc); var compressed = ArrayList(u8).init(testing.allocator); diff --git a/lib/std/compress/deflate/dict_decoder.zig b/lib/std/compress/deflate/dict_decoder.zig index d9f240e7b4ad..75fdd359dd9f 100644 --- a/lib/std/compress/deflate/dict_decoder.zig +++ b/lib/std/compress/deflate/dict_decoder.zig @@ -49,7 +49,7 @@ pub const DictDecoder = struct { if (dict != null) { const src = dict.?[dict.?.len -| self.hist.len..]; @memcpy(self.hist[0..src.len], src); - self.wr_pos = @intCast(u32, dict.?.len); + self.wr_pos = @as(u32, @intCast(dict.?.len)); } if (self.wr_pos == self.hist.len) { @@ -66,7 +66,7 @@ pub const DictDecoder = struct { // Reports the total amount of historical data in the dictionary. pub fn histSize(self: *Self) u32 { if (self.full) { - return @intCast(u32, self.hist.len); + return @as(u32, @intCast(self.hist.len)); } return self.wr_pos; } @@ -78,7 +78,7 @@ pub const DictDecoder = struct { // Reports the available amount of output buffer space. pub fn availWrite(self: *Self) u32 { - return @intCast(u32, self.hist.len - self.wr_pos); + return @as(u32, @intCast(self.hist.len - self.wr_pos)); } // Returns a slice of the available buffer to write data to. @@ -110,10 +110,10 @@ pub const DictDecoder = struct { fn copy(dst: []u8, src: []const u8) u32 { if (src.len > dst.len) { mem.copyForwards(u8, dst, src[0..dst.len]); - return @intCast(u32, dst.len); + return @as(u32, @intCast(dst.len)); } mem.copyForwards(u8, dst[0..src.len], src); - return @intCast(u32, src.len); + return @as(u32, @intCast(src.len)); } // Copies a string at a given (dist, length) to the output. @@ -125,10 +125,10 @@ pub const DictDecoder = struct { assert(0 < dist and dist <= self.histSize()); var dst_base = self.wr_pos; var dst_pos = dst_base; - var src_pos: i32 = @intCast(i32, dst_pos) - @intCast(i32, dist); + var src_pos: i32 = @as(i32, @intCast(dst_pos)) - @as(i32, @intCast(dist)); var end_pos = dst_pos + length; if (end_pos > self.hist.len) { - end_pos = @intCast(u32, self.hist.len); + end_pos = @as(u32, @intCast(self.hist.len)); } // Copy non-overlapping section after destination position. @@ -139,8 +139,8 @@ pub const DictDecoder = struct { // Thus, a backwards copy is performed here; that is, the exact bytes in // the source prior to the copy is placed in the destination. if (src_pos < 0) { - src_pos += @intCast(i32, self.hist.len); - dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..]); + src_pos += @as(i32, @intCast(self.hist.len)); + dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..]); src_pos = 0; } @@ -160,7 +160,7 @@ pub const DictDecoder = struct { // dst_pos = end_pos; // while (dst_pos < end_pos) { - dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..dst_pos]); + dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..dst_pos]); } self.wr_pos = dst_pos; diff --git a/lib/std/compress/deflate/huffman_bit_writer.zig b/lib/std/compress/deflate/huffman_bit_writer.zig index a852287b538b..520443510659 100644 --- a/lib/std/compress/deflate/huffman_bit_writer.zig +++ b/lib/std/compress/deflate/huffman_bit_writer.zig @@ -107,7 +107,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } var n = self.nbytes; while (self.nbits != 0) { - self.bytes[n] = @truncate(u8, self.bits); + self.bytes[n] = @as(u8, @truncate(self.bits)); self.bits >>= 8; if (self.nbits > 8) { // Avoid underflow self.nbits -= 8; @@ -132,7 +132,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { if (self.err) { return; } - self.bits |= @intCast(u64, b) << @intCast(u6, self.nbits); + self.bits |= @as(u64, @intCast(b)) << @as(u6, @intCast(self.nbits)); self.nbits += nb; if (self.nbits >= 48) { var bits = self.bits; @@ -140,12 +140,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { self.nbits -= 48; var n = self.nbytes; var bytes = self.bytes[n..][0..6]; - bytes[0] = @truncate(u8, bits); - bytes[1] = @truncate(u8, bits >> 8); - bytes[2] = @truncate(u8, bits >> 16); - bytes[3] = @truncate(u8, bits >> 24); - bytes[4] = @truncate(u8, bits >> 32); - bytes[5] = @truncate(u8, bits >> 40); + bytes[0] = @as(u8, @truncate(bits)); + bytes[1] = @as(u8, @truncate(bits >> 8)); + bytes[2] = @as(u8, @truncate(bits >> 16)); + bytes[3] = @as(u8, @truncate(bits >> 24)); + bytes[4] = @as(u8, @truncate(bits >> 32)); + bytes[5] = @as(u8, @truncate(bits >> 40)); n += 6; if (n >= buffer_flush_size) { try self.write(self.bytes[0..n]); @@ -165,7 +165,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { return; } while (self.nbits != 0) { - self.bytes[n] = @truncate(u8, self.bits); + self.bytes[n] = @as(u8, @truncate(self.bits)); self.bits >>= 8; self.nbits -= 8; n += 1; @@ -209,12 +209,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { // Copy the concatenated code sizes to codegen. Put a marker at the end. var cgnl = codegen[0..num_literals]; for (cgnl, 0..) |_, i| { - cgnl[i] = @intCast(u8, lit_enc.codes[i].len); + cgnl[i] = @as(u8, @intCast(lit_enc.codes[i].len)); } cgnl = codegen[num_literals .. num_literals + num_offsets]; for (cgnl, 0..) |_, i| { - cgnl[i] = @intCast(u8, off_enc.codes[i].len); + cgnl[i] = @as(u8, @intCast(off_enc.codes[i].len)); } codegen[num_literals + num_offsets] = bad_code; @@ -243,7 +243,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } codegen[out_index] = 16; out_index += 1; - codegen[out_index] = @intCast(u8, n - 3); + codegen[out_index] = @as(u8, @intCast(n - 3)); out_index += 1; self.codegen_freq[16] += 1; count -= n; @@ -256,7 +256,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } codegen[out_index] = 18; out_index += 1; - codegen[out_index] = @intCast(u8, n - 11); + codegen[out_index] = @as(u8, @intCast(n - 11)); out_index += 1; self.codegen_freq[18] += 1; count -= n; @@ -265,7 +265,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { // 3 <= count <= 10 codegen[out_index] = 17; out_index += 1; - codegen[out_index] = @intCast(u8, count - 3); + codegen[out_index] = @as(u8, @intCast(count - 3)); out_index += 1; self.codegen_freq[17] += 1; count = 0; @@ -307,8 +307,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { extra_bits; return DynamicSize{ - .size = @intCast(u32, size), - .num_codegens = @intCast(u32, num_codegens), + .size = @as(u32, @intCast(size)), + .num_codegens = @as(u32, @intCast(num_codegens)), }; } @@ -328,7 +328,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { return .{ .size = 0, .storable = false }; } if (in.?.len <= deflate_const.max_store_block_size) { - return .{ .size = @intCast(u32, (in.?.len + 5) * 8), .storable = true }; + return .{ .size = @as(u32, @intCast((in.?.len + 5) * 8)), .storable = true }; } return .{ .size = 0, .storable = false }; } @@ -337,20 +337,20 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { if (self.err) { return; } - self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits); - self.nbits += @intCast(u32, c.len); + self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits)); + self.nbits += @as(u32, @intCast(c.len)); if (self.nbits >= 48) { var bits = self.bits; self.bits >>= 48; self.nbits -= 48; var n = self.nbytes; var bytes = self.bytes[n..][0..6]; - bytes[0] = @truncate(u8, bits); - bytes[1] = @truncate(u8, bits >> 8); - bytes[2] = @truncate(u8, bits >> 16); - bytes[3] = @truncate(u8, bits >> 24); - bytes[4] = @truncate(u8, bits >> 32); - bytes[5] = @truncate(u8, bits >> 40); + bytes[0] = @as(u8, @truncate(bits)); + bytes[1] = @as(u8, @truncate(bits >> 8)); + bytes[2] = @as(u8, @truncate(bits >> 16)); + bytes[3] = @as(u8, @truncate(bits >> 24)); + bytes[4] = @as(u8, @truncate(bits >> 32)); + bytes[5] = @as(u8, @truncate(bits >> 40)); n += 6; if (n >= buffer_flush_size) { try self.write(self.bytes[0..n]); @@ -381,36 +381,36 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { first_bits = 5; } try self.writeBits(first_bits, 3); - try self.writeBits(@intCast(u32, num_literals - 257), 5); - try self.writeBits(@intCast(u32, num_offsets - 1), 5); - try self.writeBits(@intCast(u32, num_codegens - 4), 4); + try self.writeBits(@as(u32, @intCast(num_literals - 257)), 5); + try self.writeBits(@as(u32, @intCast(num_offsets - 1)), 5); + try self.writeBits(@as(u32, @intCast(num_codegens - 4)), 4); var i: u32 = 0; while (i < num_codegens) : (i += 1) { - var value = @intCast(u32, self.codegen_encoding.codes[codegen_order[i]].len); - try self.writeBits(@intCast(u32, value), 3); + var value = @as(u32, @intCast(self.codegen_encoding.codes[codegen_order[i]].len)); + try self.writeBits(@as(u32, @intCast(value)), 3); } i = 0; while (true) { - var code_word: u32 = @intCast(u32, self.codegen[i]); + var code_word: u32 = @as(u32, @intCast(self.codegen[i])); i += 1; if (code_word == bad_code) { break; } - try self.writeCode(self.codegen_encoding.codes[@intCast(u32, code_word)]); + try self.writeCode(self.codegen_encoding.codes[@as(u32, @intCast(code_word))]); switch (code_word) { 16 => { - try self.writeBits(@intCast(u32, self.codegen[i]), 2); + try self.writeBits(@as(u32, @intCast(self.codegen[i])), 2); i += 1; }, 17 => { - try self.writeBits(@intCast(u32, self.codegen[i]), 3); + try self.writeBits(@as(u32, @intCast(self.codegen[i])), 3); i += 1; }, 18 => { - try self.writeBits(@intCast(u32, self.codegen[i]), 7); + try self.writeBits(@as(u32, @intCast(self.codegen[i])), 7); i += 1; }, else => {}, @@ -428,8 +428,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { } try self.writeBits(flag, 3); try self.flush(); - try self.writeBits(@intCast(u32, length), 16); - try self.writeBits(@intCast(u32, ~@intCast(u16, length)), 16); + try self.writeBits(@as(u32, @intCast(length)), 16); + try self.writeBits(@as(u32, @intCast(~@as(u16, @intCast(length)))), 16); } fn writeFixedHeader(self: *Self, is_eof: bool) Error!void { @@ -476,14 +476,14 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { var length_code: u32 = length_codes_start + 8; while (length_code < num_literals) : (length_code += 1) { // First eight length codes have extra size = 0. - extra_bits += @intCast(u32, self.literal_freq[length_code]) * - @intCast(u32, length_extra_bits[length_code - length_codes_start]); + extra_bits += @as(u32, @intCast(self.literal_freq[length_code])) * + @as(u32, @intCast(length_extra_bits[length_code - length_codes_start])); } var offset_code: u32 = 4; while (offset_code < num_offsets) : (offset_code += 1) { // First four offset codes have extra size = 0. - extra_bits += @intCast(u32, self.offset_freq[offset_code]) * - @intCast(u32, offset_extra_bits[offset_code]); + extra_bits += @as(u32, @intCast(self.offset_freq[offset_code])) * + @as(u32, @intCast(offset_extra_bits[offset_code])); } } @@ -621,12 +621,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { self.literal_freq[token.literal(deflate_const.end_block_marker)] += 1; // get the number of literals - num_literals = @intCast(u32, self.literal_freq.len); + num_literals = @as(u32, @intCast(self.literal_freq.len)); while (self.literal_freq[num_literals - 1] == 0) { num_literals -= 1; } // get the number of offsets - num_offsets = @intCast(u32, self.offset_freq.len); + num_offsets = @as(u32, @intCast(self.offset_freq.len)); while (num_offsets > 0 and self.offset_freq[num_offsets - 1] == 0) { num_offsets -= 1; } @@ -664,18 +664,18 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { var length = token.length(t); var length_code = token.lengthCode(length); try self.writeCode(le_codes[length_code + length_codes_start]); - var extra_length_bits = @intCast(u32, length_extra_bits[length_code]); + var extra_length_bits = @as(u32, @intCast(length_extra_bits[length_code])); if (extra_length_bits > 0) { - var extra_length = @intCast(u32, length - length_base[length_code]); + var extra_length = @as(u32, @intCast(length - length_base[length_code])); try self.writeBits(extra_length, extra_length_bits); } // Write the offset var offset = token.offset(t); var offset_code = token.offsetCode(offset); try self.writeCode(oe_codes[offset_code]); - var extra_offset_bits = @intCast(u32, offset_extra_bits[offset_code]); + var extra_offset_bits = @as(u32, @intCast(offset_extra_bits[offset_code])); if (extra_offset_bits > 0) { - var extra_offset = @intCast(u32, offset - offset_base[offset_code]); + var extra_offset = @as(u32, @intCast(offset - offset_base[offset_code])); try self.writeBits(extra_offset, extra_offset_bits); } } @@ -742,8 +742,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { for (input) |t| { // Bitwriting inlined, ~30% speedup var c = encoding[t]; - self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits); - self.nbits += @intCast(u32, c.len); + self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits)); + self.nbits += @as(u32, @intCast(c.len)); if (self.nbits < 48) { continue; } @@ -752,12 +752,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type { self.bits >>= 48; self.nbits -= 48; var bytes = self.bytes[n..][0..6]; - bytes[0] = @truncate(u8, bits); - bytes[1] = @truncate(u8, bits >> 8); - bytes[2] = @truncate(u8, bits >> 16); - bytes[3] = @truncate(u8, bits >> 24); - bytes[4] = @truncate(u8, bits >> 32); - bytes[5] = @truncate(u8, bits >> 40); + bytes[0] = @as(u8, @truncate(bits)); + bytes[1] = @as(u8, @truncate(bits >> 8)); + bytes[2] = @as(u8, @truncate(bits >> 16)); + bytes[3] = @as(u8, @truncate(bits >> 24)); + bytes[4] = @as(u8, @truncate(bits >> 32)); + bytes[5] = @as(u8, @truncate(bits >> 40)); n += 6; if (n < buffer_flush_size) { continue; diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig index 689ac1441a95..4fea45f86313 100644 --- a/lib/std/compress/deflate/huffman_code.zig +++ b/lib/std/compress/deflate/huffman_code.zig @@ -73,7 +73,7 @@ pub const HuffmanEncoder = struct { // Set list to be the set of all non-zero literals and their frequencies for (freq, 0..) |f, i| { if (f != 0) { - list[count] = LiteralNode{ .literal = @intCast(u16, i), .freq = f }; + list[count] = LiteralNode{ .literal = @as(u16, @intCast(i)), .freq = f }; count += 1; } else { list[count] = LiteralNode{ .literal = 0x00, .freq = 0 }; @@ -88,7 +88,7 @@ pub const HuffmanEncoder = struct { // two or fewer literals, everything has bit length 1. for (list, 0..) |node, i| { // "list" is in order of increasing literal value. - self.codes[node.literal].set(@intCast(u16, i), 1); + self.codes[node.literal].set(@as(u16, @intCast(i)), 1); } return; } @@ -105,7 +105,7 @@ pub const HuffmanEncoder = struct { var total: u32 = 0; for (freq, 0..) |f, i| { if (f != 0) { - total += @intCast(u32, f) * @intCast(u32, self.codes[i].len); + total += @as(u32, @intCast(f)) * @as(u32, @intCast(self.codes[i].len)); } } return total; @@ -167,7 +167,7 @@ pub const HuffmanEncoder = struct { } // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[max_bits].needed = 2 * @intCast(u32, n) - 4; + levels[max_bits].needed = 2 * @as(u32, @intCast(n)) - 4; { var level = max_bits; @@ -267,19 +267,19 @@ pub const HuffmanEncoder = struct { // are encoded using "bits" bits, and get the values // code, code + 1, .... The code values are // assigned in literal order (not frequency order). - var chunk = list[list.len - @intCast(u32, bits) ..]; + var chunk = list[list.len - @as(u32, @intCast(bits)) ..]; self.lns = chunk; mem.sort(LiteralNode, self.lns, {}, byLiteral); for (chunk) |node| { self.codes[node.literal] = HuffCode{ - .code = bu.bitReverse(u16, code, @intCast(u5, n)), - .len = @intCast(u16, n), + .code = bu.bitReverse(u16, code, @as(u5, @intCast(n))), + .len = @as(u16, @intCast(n)), }; code += 1; } - list = list[0 .. list.len - @intCast(u32, bits)]; + list = list[0 .. list.len - @as(u32, @intCast(bits))]; } } }; @@ -332,7 +332,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder { size = 8; }, } - codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @intCast(u5, size)), .len = size }; + codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @as(u5, @intCast(size))), .len = size }; } return h; } @@ -341,7 +341,7 @@ pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder { var h = try newHuffmanEncoder(allocator, 30); var codes = h.codes; for (codes, 0..) |_, ch| { - codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @intCast(u16, ch), 5), .len = 5 }; + codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 }; } return h; } diff --git a/lib/std/compress/deflate/token.zig b/lib/std/compress/deflate/token.zig index d0e9a23647bb..744fcdeb12ea 100644 --- a/lib/std/compress/deflate/token.zig +++ b/lib/std/compress/deflate/token.zig @@ -70,16 +70,16 @@ pub fn matchToken(xlength: u32, xoffset: u32) Token { // Returns the literal of a literal token pub fn literal(t: Token) u32 { - return @intCast(u32, t - literal_type); + return @as(u32, @intCast(t - literal_type)); } // Returns the extra offset of a match token pub fn offset(t: Token) u32 { - return @intCast(u32, t) & offset_mask; + return @as(u32, @intCast(t)) & offset_mask; } pub fn length(t: Token) u32 { - return @intCast(u32, (t - match_type) >> length_shift); + return @as(u32, @intCast((t - match_type) >> length_shift)); } pub fn lengthCode(len: u32) u32 { @@ -88,10 +88,10 @@ pub fn lengthCode(len: u32) u32 { // Returns the offset code corresponding to a specific offset pub fn offsetCode(off: u32) u32 { - if (off < @intCast(u32, offset_codes.len)) { + if (off < @as(u32, @intCast(offset_codes.len))) { return offset_codes[off]; } - if (off >> 7 < @intCast(u32, offset_codes.len)) { + if (off >> 7 < @as(u32, @intCast(offset_codes.len))) { return offset_codes[off >> 7] + 14; } return offset_codes[off >> 14] + 28; diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig index 7e9fea6814fa..f6fb038ae3cf 100644 --- a/lib/std/compress/gzip.zig +++ b/lib/std/compress/gzip.zig @@ -89,7 +89,7 @@ pub fn Decompress(comptime ReaderType: type) type { if (FLG & FHCRC != 0) { const hash = try source.readIntLittle(u16); - if (hash != @truncate(u16, hasher.hasher.final())) + if (hash != @as(u16, @truncate(hasher.hasher.final()))) return error.WrongChecksum; } diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig index a6adb941a498..0dae9281e853 100644 --- a/lib/std/compress/lzma/decode.zig +++ b/lib/std/compress/lzma/decode.zig @@ -52,11 +52,11 @@ pub const Params = struct { return error.CorruptInput; } - const lc = @intCast(u4, props % 9); + const lc = @as(u4, @intCast(props % 9)); props /= 9; - const lp = @intCast(u3, props % 5); + const lp = @as(u3, @intCast(props % 5)); props /= 5; - const pb = @intCast(u3, props); + const pb = @as(u3, @intCast(props)); const dict_size_provided = try reader.readIntLittle(u32); const dict_size = @max(0x1000, dict_size_provided); @@ -342,7 +342,7 @@ pub const DecoderState = struct { result = (result << 1) ^ @intFromBool(try decoder.decodeBit(reader, &probs[result], update)); } - return @truncate(u8, result - 0x100); + return @as(u8, @truncate(result - 0x100)); } fn decodeDistance( @@ -358,7 +358,7 @@ pub const DecoderState = struct { if (pos_slot < 4) return pos_slot; - const num_direct_bits = @intCast(u5, (pos_slot >> 1) - 1); + const num_direct_bits = @as(u5, @intCast((pos_slot >> 1) - 1)); var result = (2 ^ (pos_slot & 1)) << num_direct_bits; if (pos_slot < 14) { diff --git a/lib/std/compress/lzma2/decode.zig b/lib/std/compress/lzma2/decode.zig index 7297a1a51b4f..a23007d42ad6 100644 --- a/lib/std/compress/lzma2/decode.zig +++ b/lib/std/compress/lzma2/decode.zig @@ -119,11 +119,11 @@ pub const Decoder = struct { return error.CorruptInput; } - const lc = @intCast(u4, props % 9); + const lc = @as(u4, @intCast(props % 9)); props /= 9; - const lp = @intCast(u3, props % 5); + const lp = @as(u3, @intCast(props % 5)); props /= 5; - const pb = @intCast(u3, props); + const pb = @as(u3, @intCast(props)); if (lc + lp > 4) { return error.CorruptInput; diff --git a/lib/std/compress/xz.zig b/lib/std/compress/xz.zig index 5debc81835ef..3ceec90a7a1e 100644 --- a/lib/std/compress/xz.zig +++ b/lib/std/compress/xz.zig @@ -18,7 +18,7 @@ fn readStreamFlags(reader: anytype, check: *Check) !void { if (reserved1 != 0) return error.CorruptInput; - check.* = @enumFromInt(Check, try bit_reader.readBitsNoEof(u4, 4)); + check.* = @as(Check, @enumFromInt(try bit_reader.readBitsNoEof(u4, 4))); const reserved2 = try bit_reader.readBitsNoEof(u4, 4); if (reserved2 != 0) diff --git a/lib/std/compress/xz/block.zig b/lib/std/compress/xz/block.zig index 2a034011c2b0..6f4fad1c7f52 100644 --- a/lib/std/compress/xz/block.zig +++ b/lib/std/compress/xz/block.zig @@ -108,7 +108,7 @@ pub fn Decoder(comptime ReaderType: type) type { has_unpacked_size: bool, }; - const flags = @bitCast(Flags, try header_reader.readByte()); + const flags = @as(Flags, @bitCast(try header_reader.readByte())); const filter_count = @as(u3, flags.last_filter_index) + 1; if (filter_count > 1) return error.Unsupported; @@ -124,9 +124,9 @@ pub fn Decoder(comptime ReaderType: type) type { _, }; - const filter_id = @enumFromInt( + const filter_id = @as( FilterId, - try std.leb.readULEB128(u64, header_reader), + @enumFromInt(try std.leb.readULEB128(u64, header_reader)), ); if (@intFromEnum(filter_id) >= 0x4000_0000_0000_0000) diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig index 98cabb473262..5580192537f1 100644 --- a/lib/std/compress/zlib.zig +++ b/lib/std/compress/zlib.zig @@ -41,7 +41,7 @@ pub fn DecompressStream(comptime ReaderType: type) type { // verify the header checksum if (header_u16 % 31 != 0) return error.BadHeader; - const header = @bitCast(ZLibHeader, header_u16); + const header = @as(ZLibHeader, @bitCast(header_u16)); // The CM field must be 8 to indicate the use of DEFLATE if (header.compression_method != ZLibHeader.DEFLATE) @@ -130,9 +130,9 @@ pub fn CompressStream(comptime WriterType: type) type { .preset_dict = 0, .checksum = 0, }; - header.checksum = @truncate(u5, 31 - @bitCast(u16, header) % 31); + header.checksum = @as(u5, @truncate(31 - @as(u16, @bitCast(header)) % 31)); - try dest.writeIntBig(u16, @bitCast(u16, header)); + try dest.writeIntBig(u16, @as(u16, @bitCast(header))); const compression_level: deflate.Compression = switch (options.level) { .no_compression => .no_compression, diff --git a/lib/std/compress/zstandard/decode/block.zig b/lib/std/compress/zstandard/decode/block.zig index 40f5903a2491..bbf8492f04b2 100644 --- a/lib/std/compress/zstandard/decode/block.zig +++ b/lib/std/compress/zstandard/decode/block.zig @@ -894,7 +894,7 @@ pub fn decodeBlockReader( /// Decode the header of a block. pub fn decodeBlockHeader(src: *const [3]u8) frame.Zstandard.Block.Header { const last_block = src[0] & 1 == 1; - const block_type = @enumFromInt(frame.Zstandard.Block.Type, (src[0] & 0b110) >> 1); + const block_type = @as(frame.Zstandard.Block.Type, @enumFromInt((src[0] & 0b110) >> 1)); const block_size = ((src[0] & 0b11111000) >> 3) + (@as(u21, src[1]) << 5) + (@as(u21, src[2]) << 13); return .{ .last_block = last_block, @@ -1008,7 +1008,7 @@ pub fn decodeLiteralsSection( try huffman.decodeHuffmanTree(counting_reader.reader(), buffer) else null; - const huffman_tree_size = @intCast(usize, counting_reader.bytes_read); + const huffman_tree_size = @as(usize, @intCast(counting_reader.bytes_read)); const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch return error.MalformedLiteralsSection; @@ -1058,8 +1058,8 @@ fn decodeStreams(size_format: u2, stream_data: []const u8) !LiteralsSection.Stre /// - `error.EndOfStream` if there are not enough bytes in `source` pub fn decodeLiteralsHeader(source: anytype) !LiteralsSection.Header { const byte0 = try source.readByte(); - const block_type = @enumFromInt(LiteralsSection.BlockType, byte0 & 0b11); - const size_format = @intCast(u2, (byte0 & 0b1100) >> 2); + const block_type = @as(LiteralsSection.BlockType, @enumFromInt(byte0 & 0b11)); + const size_format = @as(u2, @intCast((byte0 & 0b1100) >> 2)); var regenerated_size: u20 = undefined; var compressed_size: ?u18 = null; switch (block_type) { @@ -1132,9 +1132,9 @@ pub fn decodeSequencesHeader( const compression_modes = try source.readByte(); - const matches_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00001100) >> 2); - const offsets_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00110000) >> 4); - const literal_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b11000000) >> 6); + const matches_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00001100) >> 2)); + const offsets_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00110000) >> 4)); + const literal_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b11000000) >> 6)); if (compression_modes & 0b11 != 0) return error.ReservedBitSet; return SequencesSection.Header{ diff --git a/lib/std/compress/zstandard/decode/fse.zig b/lib/std/compress/zstandard/decode/fse.zig index 232af39ccfe5..6e987f9c6fb8 100644 --- a/lib/std/compress/zstandard/decode/fse.zig +++ b/lib/std/compress/zstandard/decode/fse.zig @@ -69,7 +69,7 @@ pub fn decodeFseTable( } fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { - const total_probability = @intCast(u16, entries.len); + const total_probability = @as(u16, @intCast(entries.len)); const accuracy_log = std.math.log2_int(u16, total_probability); assert(total_probability <= 1 << 9); @@ -77,7 +77,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { for (values, 0..) |value, i| { if (value == 0) { entries[entries.len - 1 - less_than_one_count] = Table.Fse{ - .symbol = @intCast(u8, i), + .symbol = @as(u8, @intCast(i)), .baseline = 0, .bits = accuracy_log, }; @@ -99,7 +99,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { const share_size_log = std.math.log2_int(u16, share_size); for (0..probability) |i| { - temp_states[i] = @intCast(u16, position); + temp_states[i] = @as(u16, @intCast(position)); position += (entries.len >> 1) + (entries.len >> 3) + 3; position &= entries.len - 1; while (position >= entries.len - less_than_one_count) { @@ -110,13 +110,13 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16)); for (0..probability) |i| { entries[temp_states[i]] = if (i < double_state_count) Table.Fse{ - .symbol = @intCast(u8, symbol), + .symbol = @as(u8, @intCast(symbol)), .bits = share_size_log + 1, - .baseline = single_state_count * share_size + @intCast(u16, i) * 2 * share_size, + .baseline = single_state_count * share_size + @as(u16, @intCast(i)) * 2 * share_size, } else Table.Fse{ - .symbol = @intCast(u8, symbol), + .symbol = @as(u8, @intCast(symbol)), .bits = share_size_log, - .baseline = (@intCast(u16, i) - double_state_count) * share_size, + .baseline = (@as(u16, @intCast(i)) - double_state_count) * share_size, }; } } diff --git a/lib/std/compress/zstandard/decode/huffman.zig b/lib/std/compress/zstandard/decode/huffman.zig index f5e977d0dadd..13fb1ac5f256 100644 --- a/lib/std/compress/zstandard/decode/huffman.zig +++ b/lib/std/compress/zstandard/decode/huffman.zig @@ -109,8 +109,8 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights const weights_byte_count = (encoded_symbol_count + 1) / 2; for (0..weights_byte_count) |i| { const byte = try source.readByte(); - weights[2 * i] = @intCast(u4, byte >> 4); - weights[2 * i + 1] = @intCast(u4, byte & 0xF); + weights[2 * i] = @as(u4, @intCast(byte >> 4)); + weights[2 * i + 1] = @as(u4, @intCast(byte & 0xF)); } return encoded_symbol_count + 1; } @@ -118,7 +118,7 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.PrefixedSymbol, weights: [256]u4) usize { for (0..weight_sorted_prefixed_symbols.len) |i| { weight_sorted_prefixed_symbols[i] = .{ - .symbol = @intCast(u8, i), + .symbol = @as(u8, @intCast(i)), .weight = undefined, .prefix = undefined, }; @@ -167,7 +167,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm weight_power_sum_big += (@as(u16, 1) << value) >> 1; } if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree; - const weight_power_sum = @intCast(u16, weight_power_sum_big); + const weight_power_sum = @as(u16, @intCast(weight_power_sum_big)); // advance to next power of two (even if weight_power_sum is a power of 2) // TODO: is it valid to have weight_power_sum == 0? @@ -179,7 +179,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*); const tree = LiteralsSection.HuffmanTree{ .max_bit_count = max_number_of_bits, - .symbol_count_minus_one = @intCast(u8, prefixed_symbol_count - 1), + .symbol_count_minus_one = @as(u8, @intCast(prefixed_symbol_count - 1)), .nodes = weight_sorted_prefixed_symbols, }; return tree; diff --git a/lib/std/compress/zstandard/decompress.zig b/lib/std/compress/zstandard/decompress.zig index a2ba59e6887b..bc977d1fba21 100644 --- a/lib/std/compress/zstandard/decompress.zig +++ b/lib/std/compress/zstandard/decompress.zig @@ -260,7 +260,7 @@ pub fn decodeFrameArrayList( /// Returns the frame checksum corresponding to the data fed into `hasher` pub fn computeChecksum(hasher: *std.hash.XxHash64) u32 { const hash = hasher.final(); - return @intCast(u32, hash & 0xFFFFFFFF); + return @as(u32, @intCast(hash & 0xFFFFFFFF)); } const FrameError = error{ @@ -398,7 +398,7 @@ pub const FrameContext = struct { const window_size = if (window_size_raw > window_size_max) return error.WindowTooLarge else - @intCast(usize, window_size_raw); + @as(usize, @intCast(window_size_raw)); const should_compute_checksum = frame_header.descriptor.content_checksum_flag and verify_checksum; @@ -585,7 +585,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 { const exponent = (descriptor & 0b11111000) >> 3; const mantissa = descriptor & 0b00000111; const window_log = 10 + exponent; - const window_base = @as(u64, 1) << @intCast(u6, window_log); + const window_base = @as(u64, 1) << @as(u6, @intCast(window_log)); const window_add = (window_base / 8) * mantissa; return window_base + window_add; } else return header.content_size; @@ -599,7 +599,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 { pub fn decodeZstandardHeader( source: anytype, ) (@TypeOf(source).Error || error{ EndOfStream, ReservedBitSet })!ZstandardHeader { - const descriptor = @bitCast(ZstandardHeader.Descriptor, try source.readByte()); + const descriptor = @as(ZstandardHeader.Descriptor, @bitCast(try source.readByte())); if (descriptor.reserved) return error.ReservedBitSet; diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig index f5938dd21806..7c3343ba8cd2 100644 --- a/lib/std/crypto/25519/curve25519.zig +++ b/lib/std/crypto/25519/curve25519.zig @@ -54,7 +54,7 @@ pub const Curve25519 = struct { var swap: u8 = 0; var pos: usize = bits - 1; while (true) : (pos -= 1) { - const bit = (s[pos >> 3] >> @truncate(u3, pos)) & 1; + const bit = (s[pos >> 3] >> @as(u3, @truncate(pos))) & 1; swap ^= bit; Fe.cSwap2(&x2, &x3, &z2, &z3, swap); swap = bit; diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig index 50f34c45f37f..bf0c62f9def9 100644 --- a/lib/std/crypto/25519/edwards25519.zig +++ b/lib/std/crypto/25519/edwards25519.zig @@ -162,8 +162,8 @@ pub const Edwards25519 = struct { const reduced = if ((s[s.len - 1] & 0x80) == 0) s else scalar.reduce(s); var e: [2 * 32]i8 = undefined; for (reduced, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -190,9 +190,9 @@ pub const Edwards25519 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -206,7 +206,7 @@ pub const Edwards25519 = struct { var q = Edwards25519.identityElement; var pos: usize = 252; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -283,15 +283,15 @@ pub const Edwards25519 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -326,9 +326,9 @@ pub const Edwards25519 = struct { for (es, 0..) |e, i| { const slot = e[pos]; if (slot > 0) { - q = q.add(pcs[i][@intCast(usize, slot)]); + q = q.add(pcs[i][@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pcs[i][@intCast(usize, -slot)]); + q = q.sub(pcs[i][@as(usize, @intCast(-slot))]); } } if (pos == 0) break; @@ -427,7 +427,7 @@ pub const Edwards25519 = struct { } const empty_block = [_]u8{0} ** H.block_length; var t = [3]u8{ 0, n * h_l, 0 }; - var xctx_len_u8 = [1]u8{@intCast(u8, xctx.len)}; + var xctx_len_u8 = [1]u8{@as(u8, @intCast(xctx.len))}; var st = H.init(.{}); st.update(empty_block[0..]); st.update(s); diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig index eec83f3d2e9d..627df9d4cbfc 100644 --- a/lib/std/crypto/25519/field.zig +++ b/lib/std/crypto/25519/field.zig @@ -254,11 +254,11 @@ pub const Fe = struct { var rs: [5]u64 = undefined; comptime var i = 0; inline while (i < 4) : (i += 1) { - rs[i] = @truncate(u64, r[i]) & MASK51; - r[i + 1] += @intCast(u64, r[i] >> 51); + rs[i] = @as(u64, @truncate(r[i])) & MASK51; + r[i + 1] += @as(u64, @intCast(r[i] >> 51)); } - rs[4] = @truncate(u64, r[4]) & MASK51; - var carry = @intCast(u64, r[4] >> 51); + rs[4] = @as(u64, @truncate(r[4])) & MASK51; + var carry = @as(u64, @intCast(r[4] >> 51)); rs[0] += 19 * carry; carry = rs[0] >> 51; rs[0] &= MASK51; @@ -278,8 +278,8 @@ pub const Fe = struct { var r: [5]u128 = undefined; comptime var i = 0; inline while (i < 5) : (i += 1) { - ax[i] = @intCast(u128, a.limbs[i]); - bx[i] = @intCast(u128, b.limbs[i]); + ax[i] = @as(u128, @intCast(a.limbs[i])); + bx[i] = @as(u128, @intCast(b.limbs[i])); } i = 1; inline while (i < 5) : (i += 1) { @@ -299,7 +299,7 @@ pub const Fe = struct { var r: [5]u128 = undefined; comptime var i = 0; inline while (i < 5) : (i += 1) { - ax[i] = @intCast(u128, a.limbs[i]); + ax[i] = @as(u128, @intCast(a.limbs[i])); } const a0_2 = 2 * ax[0]; const a1_2 = 2 * ax[1]; @@ -334,15 +334,15 @@ pub const Fe = struct { /// Multiply a field element with a small (32-bit) integer pub inline fn mul32(a: Fe, comptime n: u32) Fe { - const sn = @intCast(u128, n); + const sn = @as(u128, @intCast(n)); var fe: Fe = undefined; var x: u128 = 0; comptime var i = 0; inline while (i < 5) : (i += 1) { x = a.limbs[i] * sn + (x >> 51); - fe.limbs[i] = @truncate(u64, x) & MASK51; + fe.limbs[i] = @as(u64, @truncate(x)) & MASK51; } - fe.limbs[0] += @intCast(u64, x >> 51) * 19; + fe.limbs[0] += @as(u64, @intCast(x >> 51)) * 19; return fe; } @@ -402,7 +402,7 @@ pub const Fe = struct { const t2 = t.sqn(30).mul(t); const t3 = t2.sqn(60).mul(t2); const t4 = t3.sqn(120).mul(t3).sqn(10).mul(u).sqn(3).mul(_11).sq(); - return @bitCast(bool, @truncate(u1, ~(t4.toBytes()[1] & 1))); + return @as(bool, @bitCast(@as(u1, @truncate(~(t4.toBytes()[1] & 1))))); } fn uncheckedSqrt(x2: Fe) Fe { diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig index fd6d42aebe2e..1699c68e124b 100644 --- a/lib/std/crypto/25519/scalar.zig +++ b/lib/std/crypto/25519/scalar.zig @@ -27,8 +27,8 @@ pub fn rejectNonCanonical(s: CompressedScalar) NonCanonicalError!void { while (true) : (i -= 1) { const xs = @as(u16, s[i]); const xfield_order_s = @as(u16, field_order_s[i]); - c |= @intCast(u8, ((xs -% xfield_order_s) >> 8) & n); - n &= @intCast(u8, ((xs ^ xfield_order_s) -% 1) >> 8); + c |= @as(u8, @intCast(((xs -% xfield_order_s) >> 8) & n)); + n &= @as(u8, @intCast(((xs ^ xfield_order_s) -% 1) >> 8)); if (i == 0) break; } if (c == 0) { @@ -89,7 +89,7 @@ pub fn neg(s: CompressedScalar) CompressedScalar { var i: usize = 0; while (i < 64) : (i += 1) { carry = @as(u32, fs[i]) -% sx[i] -% @as(u32, carry); - sx[i] = @truncate(u8, carry); + sx[i] = @as(u8, @truncate(carry)); carry = (carry >> 8) & 1; } return reduce64(sx); @@ -129,7 +129,7 @@ pub const Scalar = struct { while (i < 4) : (i += 1) { mem.writeIntLittle(u64, bytes[i * 7 ..][0..8], expanded.limbs[i]); } - mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @intCast(u32, expanded.limbs[i])); + mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @as(u32, @intCast(expanded.limbs[i]))); return bytes; } @@ -234,42 +234,42 @@ pub const Scalar = struct { const z80 = xy440; const carry0 = z00 >> 56; - const t10 = @truncate(u64, z00) & 0xffffffffffffff; + const t10 = @as(u64, @truncate(z00)) & 0xffffffffffffff; const c00 = carry0; const t00 = t10; const carry1 = (z10 + c00) >> 56; - const t11 = @truncate(u64, (z10 + c00)) & 0xffffffffffffff; + const t11 = @as(u64, @truncate((z10 + c00))) & 0xffffffffffffff; const c10 = carry1; const t12 = t11; const carry2 = (z20 + c10) >> 56; - const t13 = @truncate(u64, (z20 + c10)) & 0xffffffffffffff; + const t13 = @as(u64, @truncate((z20 + c10))) & 0xffffffffffffff; const c20 = carry2; const t20 = t13; const carry3 = (z30 + c20) >> 56; - const t14 = @truncate(u64, (z30 + c20)) & 0xffffffffffffff; + const t14 = @as(u64, @truncate((z30 + c20))) & 0xffffffffffffff; const c30 = carry3; const t30 = t14; const carry4 = (z40 + c30) >> 56; - const t15 = @truncate(u64, (z40 + c30)) & 0xffffffffffffff; + const t15 = @as(u64, @truncate((z40 + c30))) & 0xffffffffffffff; const c40 = carry4; const t40 = t15; const carry5 = (z50 + c40) >> 56; - const t16 = @truncate(u64, (z50 + c40)) & 0xffffffffffffff; + const t16 = @as(u64, @truncate((z50 + c40))) & 0xffffffffffffff; const c50 = carry5; const t50 = t16; const carry6 = (z60 + c50) >> 56; - const t17 = @truncate(u64, (z60 + c50)) & 0xffffffffffffff; + const t17 = @as(u64, @truncate((z60 + c50))) & 0xffffffffffffff; const c60 = carry6; const t60 = t17; const carry7 = (z70 + c60) >> 56; - const t18 = @truncate(u64, (z70 + c60)) & 0xffffffffffffff; + const t18 = @as(u64, @truncate((z70 + c60))) & 0xffffffffffffff; const c70 = carry7; const t70 = t18; const carry8 = (z80 + c70) >> 56; - const t19 = @truncate(u64, (z80 + c70)) & 0xffffffffffffff; + const t19 = @as(u64, @truncate((z80 + c70))) & 0xffffffffffffff; const c80 = carry8; const t80 = t19; - const t90 = (@truncate(u64, c80)); + const t90 = (@as(u64, @truncate(c80))); const r0 = t00; const r1 = t12; const r2 = t20; @@ -356,26 +356,26 @@ pub const Scalar = struct { const carry12 = (z32 + c21) >> 56; const c31 = carry12; const carry13 = (z42 + c31) >> 56; - const t24 = @truncate(u64, z42 + c31) & 0xffffffffffffff; + const t24 = @as(u64, @truncate(z42 + c31)) & 0xffffffffffffff; const c41 = carry13; const t41 = t24; const carry14 = (z5 + c41) >> 56; - const t25 = @truncate(u64, z5 + c41) & 0xffffffffffffff; + const t25 = @as(u64, @truncate(z5 + c41)) & 0xffffffffffffff; const c5 = carry14; const t5 = t25; const carry15 = (z6 + c5) >> 56; - const t26 = @truncate(u64, z6 + c5) & 0xffffffffffffff; + const t26 = @as(u64, @truncate(z6 + c5)) & 0xffffffffffffff; const c6 = carry15; const t6 = t26; const carry16 = (z7 + c6) >> 56; - const t27 = @truncate(u64, z7 + c6) & 0xffffffffffffff; + const t27 = @as(u64, @truncate(z7 + c6)) & 0xffffffffffffff; const c7 = carry16; const t7 = t27; const carry17 = (z8 + c7) >> 56; - const t28 = @truncate(u64, z8 + c7) & 0xffffffffffffff; + const t28 = @as(u64, @truncate(z8 + c7)) & 0xffffffffffffff; const c8 = carry17; const t8 = t28; - const t9 = @truncate(u64, c8); + const t9 = @as(u64, @truncate(c8)); const qmu4_ = t41; const qmu5_ = t5; @@ -425,22 +425,22 @@ pub const Scalar = struct { const xy31 = @as(u128, qdiv3) * @as(u128, m1); const xy40 = @as(u128, qdiv4) * @as(u128, m0); const carry18 = xy00 >> 56; - const t29 = @truncate(u64, xy00) & 0xffffffffffffff; + const t29 = @as(u64, @truncate(xy00)) & 0xffffffffffffff; const c0 = carry18; const t01 = t29; const carry19 = (xy01 + xy10 + c0) >> 56; - const t31 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff; + const t31 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff; const c12 = carry19; const t110 = t31; const carry20 = (xy02 + xy11 + xy20 + c12) >> 56; - const t32 = @truncate(u64, xy02 + xy11 + xy20 + c12) & 0xffffffffffffff; + const t32 = @as(u64, @truncate(xy02 + xy11 + xy20 + c12)) & 0xffffffffffffff; const c22 = carry20; const t210 = t32; const carry = (xy03 + xy12 + xy21 + xy30 + c22) >> 56; - const t33 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c22) & 0xffffffffffffff; + const t33 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c22)) & 0xffffffffffffff; const c32 = carry; const t34 = t33; - const t42 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c32) & 0xffffffffff; + const t42 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c32)) & 0xffffffffff; const qmul0 = t01; const qmul1 = t110; @@ -498,7 +498,7 @@ pub const Scalar = struct { const t = ((b << 56) + s4) -% (y41 + b3); const b4 = b; const t4 = t; - const mask = (b4 -% @intCast(u64, ((1)))); + const mask = (b4 -% @as(u64, @intCast(((1))))); const z04 = s0 ^ (mask & (s0 ^ t0)); const z14 = s1 ^ (mask & (s1 ^ t1)); const z24 = s2 ^ (mask & (s2 ^ t2)); @@ -691,26 +691,26 @@ const ScalarDouble = struct { const carry3 = (z31 + c20) >> 56; const c30 = carry3; const carry4 = (z41 + c30) >> 56; - const t103 = @as(u64, @truncate(u64, z41 + c30)) & 0xffffffffffffff; + const t103 = @as(u64, @as(u64, @truncate(z41 + c30))) & 0xffffffffffffff; const c40 = carry4; const t410 = t103; const carry5 = (z5 + c40) >> 56; - const t104 = @as(u64, @truncate(u64, z5 + c40)) & 0xffffffffffffff; + const t104 = @as(u64, @as(u64, @truncate(z5 + c40))) & 0xffffffffffffff; const c5 = carry5; const t51 = t104; const carry6 = (z6 + c5) >> 56; - const t105 = @as(u64, @truncate(u64, z6 + c5)) & 0xffffffffffffff; + const t105 = @as(u64, @as(u64, @truncate(z6 + c5))) & 0xffffffffffffff; const c6 = carry6; const t61 = t105; const carry7 = (z7 + c6) >> 56; - const t106 = @as(u64, @truncate(u64, z7 + c6)) & 0xffffffffffffff; + const t106 = @as(u64, @as(u64, @truncate(z7 + c6))) & 0xffffffffffffff; const c7 = carry7; const t71 = t106; const carry8 = (z8 + c7) >> 56; - const t107 = @as(u64, @truncate(u64, z8 + c7)) & 0xffffffffffffff; + const t107 = @as(u64, @as(u64, @truncate(z8 + c7))) & 0xffffffffffffff; const c8 = carry8; const t81 = t107; - const t91 = @as(u64, @truncate(u64, c8)); + const t91 = @as(u64, @as(u64, @truncate(c8))); const qmu4_ = t410; const qmu5_ = t51; @@ -760,22 +760,22 @@ const ScalarDouble = struct { const xy31 = @as(u128, qdiv3) * @as(u128, m1); const xy40 = @as(u128, qdiv4) * @as(u128, m0); const carry9 = xy00 >> 56; - const t108 = @truncate(u64, xy00) & 0xffffffffffffff; + const t108 = @as(u64, @truncate(xy00)) & 0xffffffffffffff; const c0 = carry9; const t010 = t108; const carry10 = (xy01 + xy10 + c0) >> 56; - const t109 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff; + const t109 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff; const c11 = carry10; const t110 = t109; const carry11 = (xy02 + xy11 + xy20 + c11) >> 56; - const t1010 = @truncate(u64, xy02 + xy11 + xy20 + c11) & 0xffffffffffffff; + const t1010 = @as(u64, @truncate(xy02 + xy11 + xy20 + c11)) & 0xffffffffffffff; const c21 = carry11; const t210 = t1010; const carry = (xy03 + xy12 + xy21 + xy30 + c21) >> 56; - const t1011 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c21) & 0xffffffffffffff; + const t1011 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c21)) & 0xffffffffffffff; const c31 = carry; const t310 = t1011; - const t411 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c31) & 0xffffffffff; + const t411 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c31)) & 0xffffffffff; const qmul0 = t010; const qmul1 = t110; diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig index 51eb97ab3244..a4f0ff604b6c 100644 --- a/lib/std/crypto/Certificate.zig +++ b/lib/std/crypto/Certificate.zig @@ -312,7 +312,7 @@ pub const Parsed = struct { while (name_i < general_names.slice.end) { const general_name = try der.Element.parse(subject_alt_name, name_i); name_i = general_name.slice.end; - switch (@enumFromInt(GeneralNameTag, @intFromEnum(general_name.identifier.tag))) { + switch (@as(GeneralNameTag, @enumFromInt(@intFromEnum(general_name.identifier.tag)))) { .dNSName => { const dns_name = subject_alt_name[general_name.slice.start..general_name.slice.end]; if (checkHostName(host_name, dns_name)) return; @@ -379,7 +379,7 @@ pub fn parse(cert: Certificate) ParseError!Parsed { const tbs_certificate = try der.Element.parse(cert_bytes, certificate.slice.start); const version_elem = try der.Element.parse(cert_bytes, tbs_certificate.slice.start); const version = try parseVersion(cert_bytes, version_elem); - const serial_number = if (@bitCast(u8, version_elem.identifier) == 0xa0) + const serial_number = if (@as(u8, @bitCast(version_elem.identifier)) == 0xa0) try der.Element.parse(cert_bytes, version_elem.slice.end) else version_elem; @@ -597,8 +597,8 @@ const Date = struct { var month: u4 = 1; while (month < date.month) : (month += 1) { const days: u64 = std.time.epoch.getDaysInMonth( - @enumFromInt(std.time.epoch.YearLeapKind, @intFromBool(is_leap)), - @enumFromInt(std.time.epoch.Month, month), + @as(std.time.epoch.YearLeapKind, @enumFromInt(@intFromBool(is_leap))), + @as(std.time.epoch.Month, @enumFromInt(month)), ); sec += days * std.time.epoch.secs_per_day; } @@ -685,7 +685,7 @@ fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) ParseEnu pub const ParseVersionError = error{ UnsupportedCertificateVersion, CertificateFieldHasInvalidLength }; pub fn parseVersion(bytes: []const u8, version_elem: der.Element) ParseVersionError!Version { - if (@bitCast(u8, version_elem.identifier) != 0xa0) + if (@as(u8, @bitCast(version_elem.identifier)) != 0xa0) return .v1; if (version_elem.slice.end - version_elem.slice.start != 3) @@ -864,7 +864,7 @@ pub const der = struct { pub fn parse(bytes: []const u8, index: u32) ParseElementError!Element { var i = index; - const identifier = @bitCast(Identifier, bytes[i]); + const identifier = @as(Identifier, @bitCast(bytes[i])); i += 1; const size_byte = bytes[i]; i += 1; @@ -878,7 +878,7 @@ pub const der = struct { }; } - const len_size = @truncate(u7, size_byte); + const len_size = @as(u7, @truncate(size_byte)); if (len_size > @sizeOf(u32)) { return error.CertificateFieldHasInvalidLength; } @@ -1042,10 +1042,10 @@ pub const rsa = struct { var hashed: [Hash.digest_length]u8 = undefined; while (idx < len) { - c[0] = @intCast(u8, (counter >> 24) & 0xFF); - c[1] = @intCast(u8, (counter >> 16) & 0xFF); - c[2] = @intCast(u8, (counter >> 8) & 0xFF); - c[3] = @intCast(u8, counter & 0xFF); + c[0] = @as(u8, @intCast((counter >> 24) & 0xFF)); + c[1] = @as(u8, @intCast((counter >> 16) & 0xFF)); + c[2] = @as(u8, @intCast((counter >> 8) & 0xFF)); + c[3] = @as(u8, @intCast(counter & 0xFF)); std.mem.copyForwards(u8, hash[seed.len..], &c); Hash.hash(&hash, &hashed, .{}); diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index 434de6e0a830..2a5555e301f6 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -131,7 +131,7 @@ pub fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void { var ctx = w.crypt32.CertEnumCertificatesInStore(store, null); while (ctx) |context| : (ctx = w.crypt32.CertEnumCertificatesInStore(store, ctx)) { - const decoded_start = @intCast(u32, cb.bytes.items.len); + const decoded_start = @as(u32, @intCast(cb.bytes.items.len)); const encoded_cert = context.pbCertEncoded[0..context.cbCertEncoded]; try cb.bytes.appendSlice(gpa, encoded_cert); try cb.parseCert(gpa, decoded_start, now_sec); @@ -213,7 +213,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom const needed_capacity = std.math.cast(u32, decoded_size_upper_bound + size) orelse return error.CertificateAuthorityBundleTooBig; try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity); - const end_reserved = @intCast(u32, cb.bytes.items.len + decoded_size_upper_bound); + const end_reserved = @as(u32, @intCast(cb.bytes.items.len + decoded_size_upper_bound)); const buffer = cb.bytes.allocatedSlice()[end_reserved..]; const end_index = try file.readAll(buffer); const encoded_bytes = buffer[0..end_index]; @@ -230,7 +230,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom return error.MissingEndCertificateMarker; start_index = cert_end + end_marker.len; const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n"); - const decoded_start = @intCast(u32, cb.bytes.items.len); + const decoded_start = @as(u32, @intCast(cb.bytes.items.len)); const dest_buf = cb.bytes.allocatedSlice()[decoded_start..]; cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert); try cb.parseCert(gpa, decoded_start, now_sec); diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index bd7100eb4674..028275a06b49 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -21,7 +21,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void { const reader = stream.reader(); const db_header = try reader.readStructBig(ApplDbHeader); - assert(mem.eql(u8, "kych", &@bitCast([4]u8, db_header.signature))); + assert(mem.eql(u8, "kych", &@as([4]u8, @bitCast(db_header.signature)))); try stream.seekTo(db_header.schema_offset); @@ -42,7 +42,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void { const table_header = try reader.readStructBig(TableHeader); - if (@enumFromInt(std.os.darwin.cssm.DB_RECORDTYPE, table_header.table_id) != .X509_CERTIFICATE) { + if (@as(std.os.darwin.cssm.DB_RECORDTYPE, @enumFromInt(table_header.table_id)) != .X509_CERTIFICATE) { continue; } @@ -61,7 +61,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void { try cb.bytes.ensureUnusedCapacity(gpa, cert_header.cert_size); - const cert_start = @intCast(u32, cb.bytes.items.len); + const cert_start = @as(u32, @intCast(cb.bytes.items.len)); const dest_buf = cb.bytes.allocatedSlice()[cert_start..]; cb.bytes.items.len += try reader.readAtLeast(dest_buf, cert_header.cert_size); diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig index 9709a3a958d5..75633f7c694f 100644 --- a/lib/std/crypto/aegis.zig +++ b/lib/std/crypto/aegis.zig @@ -625,7 +625,7 @@ test "Aegis MAC" { const key = [_]u8{0x00} ** Aegis128LMac.key_length; var msg: [64]u8 = undefined; for (&msg, 0..) |*m, i| { - m.* = @truncate(u8, i); + m.* = @as(u8, @truncate(i)); } const st_init = Aegis128LMac.init(&key); var st = st_init; diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig index 4c2a8ff80de0..0b15555ad087 100644 --- a/lib/std/crypto/aes/soft.zig +++ b/lib/std/crypto/aes/soft.zig @@ -51,13 +51,13 @@ pub const Block = struct { const s3 = block.repr[3]; var x: [4]u32 = undefined; - x = table_lookup(&table_encrypt, @truncate(u8, s0), @truncate(u8, s1 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 24))); var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_encrypt, @truncate(u8, s1), @truncate(u8, s2 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 24))); var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_encrypt, @truncate(u8, s2), @truncate(u8, s3 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 24))); var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_encrypt, @truncate(u8, s3), @truncate(u8, s0 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 24)); + x = table_lookup(&table_encrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 24))); var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; t0 ^= round_key.repr[0]; @@ -77,31 +77,31 @@ pub const Block = struct { var x: [4]u32 = undefined; x = .{ - table_encrypt[0][@truncate(u8, s0)], - table_encrypt[1][@truncate(u8, s1 >> 8)], - table_encrypt[2][@truncate(u8, s2 >> 16)], - table_encrypt[3][@truncate(u8, s3 >> 24)], + table_encrypt[0][@as(u8, @truncate(s0))], + table_encrypt[1][@as(u8, @truncate(s1 >> 8))], + table_encrypt[2][@as(u8, @truncate(s2 >> 16))], + table_encrypt[3][@as(u8, @truncate(s3 >> 24))], }; var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_encrypt[0][@truncate(u8, s1)], - table_encrypt[1][@truncate(u8, s2 >> 8)], - table_encrypt[2][@truncate(u8, s3 >> 16)], - table_encrypt[3][@truncate(u8, s0 >> 24)], + table_encrypt[0][@as(u8, @truncate(s1))], + table_encrypt[1][@as(u8, @truncate(s2 >> 8))], + table_encrypt[2][@as(u8, @truncate(s3 >> 16))], + table_encrypt[3][@as(u8, @truncate(s0 >> 24))], }; var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_encrypt[0][@truncate(u8, s2)], - table_encrypt[1][@truncate(u8, s3 >> 8)], - table_encrypt[2][@truncate(u8, s0 >> 16)], - table_encrypt[3][@truncate(u8, s1 >> 24)], + table_encrypt[0][@as(u8, @truncate(s2))], + table_encrypt[1][@as(u8, @truncate(s3 >> 8))], + table_encrypt[2][@as(u8, @truncate(s0 >> 16))], + table_encrypt[3][@as(u8, @truncate(s1 >> 24))], }; var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_encrypt[0][@truncate(u8, s3)], - table_encrypt[1][@truncate(u8, s0 >> 8)], - table_encrypt[2][@truncate(u8, s1 >> 16)], - table_encrypt[3][@truncate(u8, s2 >> 24)], + table_encrypt[0][@as(u8, @truncate(s3))], + table_encrypt[1][@as(u8, @truncate(s0 >> 8))], + table_encrypt[2][@as(u8, @truncate(s1 >> 16))], + table_encrypt[3][@as(u8, @truncate(s2 >> 24))], }; var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; @@ -122,13 +122,13 @@ pub const Block = struct { // Last round uses s-box directly and XORs to produce output. var x: [4]u8 = undefined; - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s3 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s0)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0))); var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s0 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s1)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1))); var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s1 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s2)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2))); var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_encrypt, @truncate(u8, s2 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s3)); + x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3))); var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); t0 ^= round_key.repr[0]; @@ -147,13 +147,13 @@ pub const Block = struct { const s3 = block.repr[3]; var x: [4]u32 = undefined; - x = table_lookup(&table_decrypt, @truncate(u8, s0), @truncate(u8, s3 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 24))); var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_decrypt, @truncate(u8, s1), @truncate(u8, s0 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 24))); var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_decrypt, @truncate(u8, s2), @truncate(u8, s1 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 24))); var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; - x = table_lookup(&table_decrypt, @truncate(u8, s3), @truncate(u8, s2 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 24)); + x = table_lookup(&table_decrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 24))); var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; t0 ^= round_key.repr[0]; @@ -173,31 +173,31 @@ pub const Block = struct { var x: [4]u32 = undefined; x = .{ - table_decrypt[0][@truncate(u8, s0)], - table_decrypt[1][@truncate(u8, s3 >> 8)], - table_decrypt[2][@truncate(u8, s2 >> 16)], - table_decrypt[3][@truncate(u8, s1 >> 24)], + table_decrypt[0][@as(u8, @truncate(s0))], + table_decrypt[1][@as(u8, @truncate(s3 >> 8))], + table_decrypt[2][@as(u8, @truncate(s2 >> 16))], + table_decrypt[3][@as(u8, @truncate(s1 >> 24))], }; var t0 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_decrypt[0][@truncate(u8, s1)], - table_decrypt[1][@truncate(u8, s0 >> 8)], - table_decrypt[2][@truncate(u8, s3 >> 16)], - table_decrypt[3][@truncate(u8, s2 >> 24)], + table_decrypt[0][@as(u8, @truncate(s1))], + table_decrypt[1][@as(u8, @truncate(s0 >> 8))], + table_decrypt[2][@as(u8, @truncate(s3 >> 16))], + table_decrypt[3][@as(u8, @truncate(s2 >> 24))], }; var t1 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_decrypt[0][@truncate(u8, s2)], - table_decrypt[1][@truncate(u8, s1 >> 8)], - table_decrypt[2][@truncate(u8, s0 >> 16)], - table_decrypt[3][@truncate(u8, s3 >> 24)], + table_decrypt[0][@as(u8, @truncate(s2))], + table_decrypt[1][@as(u8, @truncate(s1 >> 8))], + table_decrypt[2][@as(u8, @truncate(s0 >> 16))], + table_decrypt[3][@as(u8, @truncate(s3 >> 24))], }; var t2 = x[0] ^ x[1] ^ x[2] ^ x[3]; x = .{ - table_decrypt[0][@truncate(u8, s3)], - table_decrypt[1][@truncate(u8, s2 >> 8)], - table_decrypt[2][@truncate(u8, s1 >> 16)], - table_decrypt[3][@truncate(u8, s0 >> 24)], + table_decrypt[0][@as(u8, @truncate(s3))], + table_decrypt[1][@as(u8, @truncate(s2 >> 8))], + table_decrypt[2][@as(u8, @truncate(s1 >> 16))], + table_decrypt[3][@as(u8, @truncate(s0 >> 24))], }; var t3 = x[0] ^ x[1] ^ x[2] ^ x[3]; @@ -218,13 +218,13 @@ pub const Block = struct { // Last round uses s-box directly and XORs to produce output. var x: [4]u8 = undefined; - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s1 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s0)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0))); var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s2 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s1)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1))); var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s3 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s2)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2))); var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); - x = sbox_lookup(&sbox_decrypt, @truncate(u8, s0 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s3)); + x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3))); var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]); t0 ^= round_key.repr[0]; @@ -348,7 +348,7 @@ fn KeySchedule(comptime Aes: type) type { const subw = struct { // Apply sbox_encrypt to each byte in w. fn func(w: u32) u32 { - const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, w), @truncate(u8, w >> 8), @truncate(u8, w >> 16), @truncate(u8, w >> 24)); + const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(w)), @as(u8, @truncate(w >> 8)), @as(u8, @truncate(w >> 16)), @as(u8, @truncate(w >> 24))); return @as(u32, x[3]) << 24 | @as(u32, x[2]) << 16 | @as(u32, x[1]) << 8 | @as(u32, x[0]); } }.func; @@ -386,7 +386,7 @@ fn KeySchedule(comptime Aes: type) type { inline while (j < 4) : (j += 1) { var rk = round_keys[(ei + j) / 4].repr[(ei + j) % 4]; if (i > 0 and i + 4 < total_words) { - const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, rk >> 24), @truncate(u8, rk >> 16), @truncate(u8, rk >> 8), @truncate(u8, rk)); + const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(rk >> 24)), @as(u8, @truncate(rk >> 16)), @as(u8, @truncate(rk >> 8)), @as(u8, @truncate(rk))); const y = table_lookup(&table_decrypt, x[3], x[2], x[1], x[0]); rk = y[0] ^ y[1] ^ y[2] ^ y[3]; } @@ -664,7 +664,7 @@ fn mul(a: u8, b: u8) u8 { } } - return @truncate(u8, s); + return @as(u8, @truncate(s)); } const cache_line_bytes = 64; diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig index 6d5ce3779a96..a05e8a7248e1 100644 --- a/lib/std/crypto/aes_ocb.zig +++ b/lib/std/crypto/aes_ocb.zig @@ -86,18 +86,18 @@ fn AesOcb(comptime Aes: anytype) type { fn getOffset(aes_enc_ctx: EncryptCtx, npub: [nonce_length]u8) Block { var nx = [_]u8{0} ** 16; - nx[0] = @intCast(u8, @truncate(u7, tag_length * 8) << 1); + nx[0] = @as(u8, @intCast(@as(u7, @truncate(tag_length * 8)) << 1)); nx[16 - nonce_length - 1] = 1; nx[nx.len - nonce_length ..].* = npub; - const bottom = @truncate(u6, nx[15]); + const bottom = @as(u6, @truncate(nx[15])); nx[15] &= 0xc0; var ktop_: Block = undefined; aes_enc_ctx.encrypt(&ktop_, &nx); const ktop = mem.readIntBig(u128, &ktop_); - var stretch = (@as(u192, ktop) << 64) | @as(u192, @truncate(u64, ktop >> 64) ^ @truncate(u64, ktop >> 56)); + var stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56))); var offset: Block = undefined; - mem.writeIntBig(u128, &offset, @truncate(u128, stretch >> (64 - @as(u7, bottom)))); + mem.writeIntBig(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom))))); return offset; } diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig index 40df3290c0a5..898bc24e6fa4 100644 --- a/lib/std/crypto/argon2.zig +++ b/lib/std/crypto/argon2.zig @@ -95,7 +95,7 @@ pub const Params = struct { pub fn fromLimits(ops_limit: u32, mem_limit: usize) Self { const m = mem_limit / 1024; std.debug.assert(m <= max_int); - return .{ .t = ops_limit, .m = @intCast(u32, m), .p = 1 }; + return .{ .t = ops_limit, .m = @as(u32, @intCast(m)), .p = 1 }; } }; @@ -111,26 +111,26 @@ fn initHash( var tmp: [4]u8 = undefined; var b2 = Blake2b512.init(.{}); mem.writeIntLittle(u32, parameters[0..4], params.p); - mem.writeIntLittle(u32, parameters[4..8], @intCast(u32, dk_len)); + mem.writeIntLittle(u32, parameters[4..8], @as(u32, @intCast(dk_len))); mem.writeIntLittle(u32, parameters[8..12], params.m); mem.writeIntLittle(u32, parameters[12..16], params.t); mem.writeIntLittle(u32, parameters[16..20], version); mem.writeIntLittle(u32, parameters[20..24], @intFromEnum(mode)); b2.update(¶meters); - mem.writeIntLittle(u32, &tmp, @intCast(u32, password.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(password.len))); b2.update(&tmp); b2.update(password); - mem.writeIntLittle(u32, &tmp, @intCast(u32, salt.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(salt.len))); b2.update(&tmp); b2.update(salt); const secret = params.secret orelse ""; std.debug.assert(secret.len <= max_int); - mem.writeIntLittle(u32, &tmp, @intCast(u32, secret.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(secret.len))); b2.update(&tmp); b2.update(secret); const ad = params.ad orelse ""; std.debug.assert(ad.len <= max_int); - mem.writeIntLittle(u32, &tmp, @intCast(u32, ad.len)); + mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(ad.len))); b2.update(&tmp); b2.update(ad); b2.final(h0[0..Blake2b512.digest_length]); @@ -140,7 +140,7 @@ fn initHash( fn blake2bLong(out: []u8, in: []const u8) void { const H = Blake2b512; var outlen_bytes: [4]u8 = undefined; - mem.writeIntLittle(u32, &outlen_bytes, @intCast(u32, out.len)); + mem.writeIntLittle(u32, &outlen_bytes, @as(u32, @intCast(out.len))); var out_buf: [H.digest_length]u8 = undefined; @@ -391,7 +391,7 @@ fn Rp(a: usize, b: usize, c: usize, d: usize) QuarterRound { } fn fBlaMka(x: u64, y: u64) u64 { - const xy = @as(u64, @truncate(u32, x)) * @as(u64, @truncate(u32, y)); + const xy = @as(u64, @as(u32, @truncate(x))) * @as(u64, @as(u32, @truncate(y))); return x +% y +% 2 *% xy; } @@ -448,7 +448,7 @@ fn indexAlpha( lane: u24, index: u32, ) u32 { - var ref_lane = @intCast(u32, rand >> 32) % threads; + var ref_lane = @as(u32, @intCast(rand >> 32)) % threads; if (n == 0 and slice == 0) { ref_lane = lane; } @@ -467,10 +467,10 @@ fn indexAlpha( if (index == 0 or lane == ref_lane) { m -= 1; } - var p = @as(u64, @truncate(u32, rand)); + var p = @as(u64, @as(u32, @truncate(rand))); p = (p * p) >> 32; p = (p * m) >> 32; - return ref_lane * lanes + @intCast(u32, ((s + m - (p + 1)) % lanes)); + return ref_lane * lanes + @as(u32, @intCast(((s + m - (p + 1)) % lanes))); } /// Derives a key from the password, salt, and argon2 parameters. diff --git a/lib/std/crypto/ascon.zig b/lib/std/crypto/ascon.zig index ae4bb57d293a..8aa0b109f21e 100644 --- a/lib/std/crypto/ascon.zig +++ b/lib/std/crypto/ascon.zig @@ -95,8 +95,8 @@ pub fn State(comptime endian: builtin.Endian) type { /// XOR a byte into the state at a given offset. pub fn addByte(self: *Self, byte: u8, offset: usize) void { const z = switch (endian) { - .Big => 64 - 8 - 8 * @truncate(u6, offset % 8), - .Little => 8 * @truncate(u6, offset % 8), + .Big => 64 - 8 - 8 * @as(u6, @truncate(offset % 8)), + .Little => 8 * @as(u6, @truncate(offset % 8)), }; self.st[offset / 8] ^= @as(u64, byte) << z; } diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig index 7bd140d584e7..87d2eef79aea 100644 --- a/lib/std/crypto/bcrypt.zig +++ b/lib/std/crypto/bcrypt.zig @@ -376,10 +376,10 @@ pub const State = struct { const Halves = struct { l: u32, r: u32 }; fn halfRound(state: *const State, i: u32, j: u32, n: usize) u32 { - var r = state.sboxes[0][@truncate(u8, j >> 24)]; - r +%= state.sboxes[1][@truncate(u8, j >> 16)]; - r ^= state.sboxes[2][@truncate(u8, j >> 8)]; - r +%= state.sboxes[3][@truncate(u8, j)]; + var r = state.sboxes[0][@as(u8, @truncate(j >> 24))]; + r +%= state.sboxes[1][@as(u8, @truncate(j >> 16))]; + r ^= state.sboxes[2][@as(u8, @truncate(j >> 8))]; + r +%= state.sboxes[3][@as(u8, @truncate(j))]; return i ^ r ^ state.subkeys[n]; } diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index f47c334ee91f..17f11382ca5f 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -54,8 +54,8 @@ pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64 const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, bytes / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(bytes / elapsed_s)); return throughput; } @@ -95,8 +95,8 @@ pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 { } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, bytes / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(bytes / elapsed_s)); return throughput; } @@ -125,8 +125,8 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, exchange_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(exchange_count / elapsed_s)); return throughput; } @@ -148,8 +148,8 @@ pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, signatures_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s)); return throughput; } @@ -172,8 +172,8 @@ pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime sign } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, signatures_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s)); return throughput; } @@ -201,8 +201,8 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = batch.len * @intFromFloat(u64, signatures_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = batch.len * @as(u64, @intFromFloat(signatures_count / elapsed_s)); return throughput; } @@ -227,8 +227,8 @@ pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, kems_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s)); return throughput; } @@ -249,8 +249,8 @@ pub fn benchmarkKemDecaps(comptime Kem: anytype, comptime kems_count: comptime_i } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, kems_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s)); return throughput; } @@ -267,8 +267,8 @@ pub fn benchmarkKemKeyGen(comptime Kem: anytype, comptime kems_count: comptime_i } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, kems_count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s)); return throughput; } @@ -309,8 +309,8 @@ pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64 mem.doNotOptimizeAway(&in); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, 2 * bytes / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(2 * bytes / elapsed_s)); return throughput; } @@ -338,8 +338,8 @@ pub fn benchmarkAes(comptime Aes: anytype, comptime count: comptime_int) !u64 { mem.doNotOptimizeAway(&in); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(count / elapsed_s)); return throughput; } @@ -367,8 +367,8 @@ pub fn benchmarkAes8(comptime Aes: anytype, comptime count: comptime_int) !u64 { mem.doNotOptimizeAway(&in); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, 8 * count / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(8 * count / elapsed_s)); return throughput; } @@ -406,7 +406,7 @@ fn benchmarkPwhash( const password = "testpass" ** 2; const opts = .{ .allocator = allocator, - .params = @ptrCast(*const ty.Params, @alignCast(std.meta.alignment(ty.Params), params)).*, + .params = @as(*const ty.Params, @ptrCast(@alignCast(params))).*, .encoding = .phc, }; var buf: [256]u8 = undefined; @@ -422,7 +422,7 @@ fn benchmarkPwhash( } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; const throughput = elapsed_s / count; return throughput; diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig index 316ea5e6b77d..ba07226d080b 100644 --- a/lib/std/crypto/blake2.zig +++ b/lib/std/crypto/blake2.zig @@ -80,7 +80,7 @@ pub fn Blake2s(comptime out_bits: usize) type { const key_len = if (options.key) |key| key.len else 0; // default parameters - d.h[0] ^= 0x01010000 ^ @truncate(u32, key_len << 8) ^ @intCast(u32, options.expected_out_bits >> 3); + d.h[0] ^= 0x01010000 ^ @as(u32, @truncate(key_len << 8)) ^ @as(u32, @intCast(options.expected_out_bits >> 3)); d.t = 0; d.buf_len = 0; @@ -127,7 +127,7 @@ pub fn Blake2s(comptime out_bits: usize) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b_slice.len); + d.buf_len += @as(u8, @intCast(b_slice.len)); } pub fn final(d: *Self, out: *[digest_length]u8) void { @@ -135,7 +135,7 @@ pub fn Blake2s(comptime out_bits: usize) type { d.t += d.buf_len; d.round(d.buf[0..], true); for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*); - out.* = @ptrCast(*[digest_length]u8, &d.h).*; + out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*; } fn round(d: *Self, b: *const [64]u8, last: bool) void { @@ -152,8 +152,8 @@ pub fn Blake2s(comptime out_bits: usize) type { v[k + 8] = iv[k]; } - v[12] ^= @truncate(u32, d.t); - v[13] ^= @intCast(u32, d.t >> 32); + v[12] ^= @as(u32, @truncate(d.t)); + v[13] ^= @as(u32, @intCast(d.t >> 32)); if (last) v[14] = ~v[14]; const rounds = comptime [_]RoundParam{ @@ -563,7 +563,7 @@ pub fn Blake2b(comptime out_bits: usize) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b_slice.len); + d.buf_len += @as(u8, @intCast(b_slice.len)); } pub fn final(d: *Self, out: *[digest_length]u8) void { @@ -571,7 +571,7 @@ pub fn Blake2b(comptime out_bits: usize) type { d.t += d.buf_len; d.round(d.buf[0..], true); for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*); - out.* = @ptrCast(*[digest_length]u8, &d.h).*; + out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*; } fn round(d: *Self, b: *const [128]u8, last: bool) void { @@ -588,8 +588,8 @@ pub fn Blake2b(comptime out_bits: usize) type { v[k + 8] = iv[k]; } - v[12] ^= @truncate(u64, d.t); - v[13] ^= @intCast(u64, d.t >> 64); + v[12] ^= @as(u64, @truncate(d.t)); + v[13] ^= @as(u64, @intCast(d.t >> 64)); if (last) v[14] = ~v[14]; const rounds = comptime [_]RoundParam{ diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index 7ad1511e798f..fc1859b99d7a 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -89,7 +89,7 @@ const CompressVectorized = struct { counter: u64, flags: u8, ) [16]u32 { - const md = Lane{ @truncate(u32, counter), @truncate(u32, counter >> 32), block_len, @as(u32, flags) }; + const md = Lane{ @as(u32, @truncate(counter)), @as(u32, @truncate(counter >> 32)), block_len, @as(u32, flags) }; var rows = Rows{ chaining_value[0..4].*, chaining_value[4..8].*, IV[0..4].*, md }; var m = Rows{ block_words[0..4].*, block_words[4..8].*, block_words[8..12].*, block_words[12..16].* }; @@ -134,7 +134,7 @@ const CompressVectorized = struct { rows[2] ^= @Vector(4, u32){ chaining_value[0], chaining_value[1], chaining_value[2], chaining_value[3] }; rows[3] ^= @Vector(4, u32){ chaining_value[4], chaining_value[5], chaining_value[6], chaining_value[7] }; - return @bitCast([16]u32, rows); + return @as([16]u32, @bitCast(rows)); } }; @@ -184,8 +184,8 @@ const CompressGeneric = struct { IV[1], IV[2], IV[3], - @truncate(u32, counter), - @truncate(u32, counter >> 32), + @as(u32, @truncate(counter)), + @as(u32, @truncate(counter >> 32)), block_len, flags, }; @@ -206,7 +206,7 @@ else CompressGeneric.compress; fn first8Words(words: [16]u32) [8]u32 { - return @ptrCast(*const [8]u32, &words).*; + return @as(*const [8]u32, @ptrCast(&words)).*; } fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 { @@ -285,7 +285,7 @@ const ChunkState = struct { const want = BLOCK_LEN - self.block_len; const take = @min(want, input.len); @memcpy(self.block[self.block_len..][0..take], input[0..take]); - self.block_len += @truncate(u8, take); + self.block_len += @as(u8, @truncate(take)); return input[take..]; } @@ -658,7 +658,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void { // Setup input pattern var input_pattern: [251]u8 = undefined; - for (&input_pattern, 0..) |*e, i| e.* = @truncate(u8, i); + for (&input_pattern, 0..) |*e, i| e.* = @as(u8, @truncate(i)); // Write repeating input pattern to hasher var input_counter = input_len; diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig index 776387cbd944..6688fb97faa6 100644 --- a/lib/std/crypto/chacha20.zig +++ b/lib/std/crypto/chacha20.zig @@ -587,8 +587,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type { const k = keyToWords(key); var c: [4]u32 = undefined; - c[0] = @truncate(u32, counter); - c[1] = @truncate(u32, counter >> 32); + c[0] = @as(u32, @truncate(counter)); + c[1] = @as(u32, @truncate(counter >> 32)); c[2] = mem.readIntLittle(u32, nonce[0..4]); c[3] = mem.readIntLittle(u32, nonce[4..8]); ChaChaImpl(rounds_nb).chacha20Xor(out, in, k, c, true); @@ -600,8 +600,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type { const k = keyToWords(key); var c: [4]u32 = undefined; - c[0] = @truncate(u32, counter); - c[1] = @truncate(u32, counter >> 32); + c[0] = @as(u32, @truncate(counter)); + c[1] = @as(u32, @truncate(counter >> 32)); c[2] = mem.readIntLittle(u32, nonce[0..4]); c[3] = mem.readIntLittle(u32, nonce[4..8]); ChaChaImpl(rounds_nb).chacha20Stream(out, k, c, true); diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig index e552af2e26f0..1a5335b07e0b 100644 --- a/lib/std/crypto/ecdsa.zig +++ b/lib/std/crypto/ecdsa.zig @@ -122,9 +122,9 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type { pub fn toDer(self: Signature, buf: *[der_encoded_max_length]u8) []u8 { var fb = io.fixedBufferStream(buf); const w = fb.writer(); - const r_len = @intCast(u8, self.r.len + (self.r[0] >> 7)); - const s_len = @intCast(u8, self.s.len + (self.s[0] >> 7)); - const seq_len = @intCast(u8, 2 + r_len + 2 + s_len); + const r_len = @as(u8, @intCast(self.r.len + (self.r[0] >> 7))); + const s_len = @as(u8, @intCast(self.s.len + (self.s[0] >> 7))); + const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len)); w.writeAll(&[_]u8{ 0x30, seq_len }) catch unreachable; w.writeAll(&[_]u8{ 0x02, r_len }) catch unreachable; if (self.r[0] >> 7 != 0) { diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 7b298c71c2ee..0a99058b2187 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -100,7 +100,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { var x = x_; var out = Self.zero; for (0..out.limbs.capacity()) |i| { - const t = if (@bitSizeOf(T) > t_bits) @truncate(TLimb, x) else x; + const t = if (@bitSizeOf(T) > t_bits) @as(TLimb, @truncate(x)) else x; out.limbs.set(i, t); x = math.shr(T, x, t_bits); } @@ -143,9 +143,9 @@ pub fn Uint(comptime max_bits: comptime_int) type { var remaining_bits = t_bits; var limb = self.limbs.get(i); while (remaining_bits >= 8) { - bytes[out_i] |= math.shl(u8, @truncate(u8, limb), shift); + bytes[out_i] |= math.shl(u8, @as(u8, @truncate(limb)), shift); const consumed = 8 - shift; - limb >>= @truncate(u4, consumed); + limb >>= @as(u4, @truncate(consumed)); remaining_bits -= consumed; shift = 0; switch (endian) { @@ -169,7 +169,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { }, } } - bytes[out_i] |= @truncate(u8, limb); + bytes[out_i] |= @as(u8, @truncate(limb)); shift = remaining_bits; } } @@ -190,7 +190,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { shift += 8; if (shift >= t_bits) { shift -= t_bits; - out.limbs.set(out_i, @truncate(TLimb, out.limbs.get(out_i))); + out.limbs.set(out_i, @as(TLimb, @truncate(out.limbs.get(out_i)))); const overflow = math.shr(Limb, bi, 8 - shift); out_i += 1; if (out_i >= out.limbs.len) { @@ -242,7 +242,7 @@ pub fn Uint(comptime max_bits: comptime_int) type { /// Returns `true` if the integer is odd. pub fn isOdd(x: Self) bool { - return @bitCast(bool, @truncate(u1, x.limbs.get(0))); + return @as(bool, @bitCast(@as(u1, @truncate(x.limbs.get(0))))); } /// Adds `y` to `x`, and returns `true` if the operation overflowed. @@ -273,8 +273,8 @@ pub fn Uint(comptime max_bits: comptime_int) type { var carry: u1 = 0; for (0..x.limbs_count()) |i| { const res = x_limbs[i] + y_limbs[i] + carry; - x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]); - carry = @truncate(u1, res >> t_bits); + x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]); + carry = @as(u1, @truncate(res >> t_bits)); } return carry; } @@ -288,8 +288,8 @@ pub fn Uint(comptime max_bits: comptime_int) type { var borrow: u1 = 0; for (0..x.limbs_count()) |i| { const res = x_limbs[i] -% y_limbs[i] -% borrow; - x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]); - borrow = @truncate(u1, res >> t_bits); + x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]); + borrow = @as(u1, @truncate(res >> t_bits)); } return borrow; } @@ -432,7 +432,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { inline for (0..comptime math.log2_int(usize, t_bits)) |_| { y = y *% (2 -% lo *% y); } - const m0inv = (@as(Limb, 1) << t_bits) - (@truncate(TLimb, y)); + const m0inv = (@as(Limb, 1) << t_bits) - (@as(TLimb, @truncate(y))); const zero = Fe{ .v = FeUint.zero }; @@ -508,18 +508,18 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var need_sub = false; var i: usize = t_bits - 1; while (true) : (i -= 1) { - var carry = @truncate(u1, math.shr(Limb, y, i)); + var carry = @as(u1, @truncate(math.shr(Limb, y, i))); var borrow: u1 = 0; for (0..self.limbs_count()) |j| { const l = ct.select(need_sub, d_limbs[j], x_limbs[j]); var res = (l << 1) + carry; - x_limbs[j] = @truncate(TLimb, res); - carry = @truncate(u1, res >> t_bits); + x_limbs[j] = @as(TLimb, @truncate(res)); + carry = @as(u1, @truncate(res >> t_bits)); res = x_limbs[j] -% m_limbs[j] -% borrow; - d_limbs[j] = @truncate(TLimb, res); + d_limbs[j] = @as(TLimb, @truncate(res)); - borrow = @truncate(u1, res >> t_bits); + borrow = @as(u1, @truncate(res >> t_bits)); } need_sub = ct.eql(carry, borrow); if (i == 0) break; @@ -531,7 +531,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { pub fn add(self: Self, x: Fe, y: Fe) Fe { var out = x; const overflow = out.v.addWithOverflow(y.v); - const underflow = @bitCast(u1, ct.limbsCmpLt(out.v, self.v)); + const underflow = @as(u1, @bitCast(ct.limbsCmpLt(out.v, self.v))); const need_sub = ct.eql(overflow, underflow); _ = out.v.conditionalSubWithOverflow(need_sub, self.v); return out; @@ -540,7 +540,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { /// Subtracts two field elements (mod m). pub fn sub(self: Self, x: Fe, y: Fe) Fe { var out = x; - const underflow = @bitCast(bool, out.v.subWithOverflow(y.v)); + const underflow = @as(bool, @bitCast(out.v.subWithOverflow(y.v))); _ = out.v.conditionalAddWithOverflow(underflow, self.v); return out; } @@ -601,7 +601,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var wide = ct.mulWide(a_limbs[i], b_limbs[0]); var z_lo = @addWithOverflow(d_limbs[0], wide.lo); - const f = @truncate(TLimb, z_lo[0] *% self.m0inv); + const f = @as(TLimb, @truncate(z_lo[0] *% self.m0inv)); var z_hi = wide.hi +% z_lo[1]; wide = ct.mulWide(f, m_limbs[0]); z_lo = @addWithOverflow(z_lo[0], wide.lo); @@ -620,13 +620,13 @@ pub fn Modulus(comptime max_bits: comptime_int) type { z_lo = @addWithOverflow(z_lo[0], carry); z_hi +%= z_lo[1]; if (j > 0) { - d_limbs[j - 1] = @truncate(TLimb, z_lo[0]); + d_limbs[j - 1] = @as(TLimb, @truncate(z_lo[0])); } carry = (z_hi << 1) | (z_lo[0] >> t_bits); } const z = overflow + carry; - d_limbs[self.limbs_count() - 1] = @truncate(TLimb, z); - overflow = @truncate(u1, z >> t_bits); + d_limbs[self.limbs_count() - 1] = @as(TLimb, @truncate(z)); + overflow = @as(u1, @truncate(z >> t_bits)); } return overflow; } @@ -735,7 +735,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { t0 = pc[k - 1]; } else { for (pc, 0..) |t, i| { - t0.v.cmov(ct.eql(k, @truncate(u8, i + 1)), t.v); + t0.v.cmov(ct.eql(k, @as(u8, @truncate(i + 1))), t.v); } } const t1 = self.montgomeryMul(out, t0); @@ -771,7 +771,7 @@ const ct_protected = struct { fn eql(x: anytype, y: @TypeOf(x)) bool { const c1 = @subWithOverflow(x, y)[1]; const c2 = @subWithOverflow(y, x)[1]; - return @bitCast(bool, 1 - (c1 | c2)); + return @as(bool, @bitCast(1 - (c1 | c2))); } // Compares two big integers in constant time, returning true if x < y. @@ -782,28 +782,28 @@ const ct_protected = struct { var c: u1 = 0; for (0..x.limbs_count()) |i| { - c = @truncate(u1, (x_limbs[i] -% y_limbs[i] -% c) >> t_bits); + c = @as(u1, @truncate((x_limbs[i] -% y_limbs[i] -% c) >> t_bits)); } - return @bitCast(bool, c); + return @as(bool, @bitCast(c)); } // Compares two big integers in constant time, returning true if x >= y. fn limbsCmpGeq(x: anytype, y: @TypeOf(x)) bool { - return @bitCast(bool, 1 - @intFromBool(ct.limbsCmpLt(x, y))); + return @as(bool, @bitCast(1 - @intFromBool(ct.limbsCmpLt(x, y)))); } // Multiplies two limbs and returns the result as a wide limb. fn mulWide(x: Limb, y: Limb) WideLimb { const half_bits = @typeInfo(Limb).Int.bits / 2; const Half = meta.Int(.unsigned, half_bits); - const x0 = @truncate(Half, x); - const x1 = @truncate(Half, x >> half_bits); - const y0 = @truncate(Half, y); - const y1 = @truncate(Half, y >> half_bits); + const x0 = @as(Half, @truncate(x)); + const x1 = @as(Half, @truncate(x >> half_bits)); + const y0 = @as(Half, @truncate(y)); + const y1 = @as(Half, @truncate(y >> half_bits)); const w0 = math.mulWide(Half, x0, y0); const t = math.mulWide(Half, x1, y0) + (w0 >> half_bits); - var w1: Limb = @truncate(Half, t); - const w2 = @truncate(Half, t >> half_bits); + var w1: Limb = @as(Half, @truncate(t)); + const w2 = @as(Half, @truncate(t >> half_bits)); w1 += math.mulWide(Half, x0, y1); const hi = math.mulWide(Half, x1, y1) + w2 + (w1 >> half_bits); const lo = x *% y; @@ -847,8 +847,8 @@ const ct_unprotected = struct { fn mulWide(x: Limb, y: Limb) WideLimb { const wide = math.mulWide(Limb, x, y); return .{ - .hi = @truncate(Limb, wide >> @typeInfo(Limb).Int.bits), - .lo = @truncate(Limb, wide), + .hi = @as(Limb, @truncate(wide >> @typeInfo(Limb).Int.bits)), + .lo = @as(Limb, @truncate(wide)), }; } }; diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 2fbff25f72d4..11379cc8e3ea 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -96,28 +96,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { const product = asm ( \\ vpclmulqdq $0x11, %[x], %[y], %[out] : [out] "=x" (-> @Vector(2, u64)), - : [x] "x" (@bitCast(@Vector(2, u64), x)), - [y] "x" (@bitCast(@Vector(2, u64), y)), + : [x] "x" (@as(@Vector(2, u64), @bitCast(x))), + [y] "x" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .lo => { const product = asm ( \\ vpclmulqdq $0x00, %[x], %[y], %[out] : [out] "=x" (-> @Vector(2, u64)), - : [x] "x" (@bitCast(@Vector(2, u64), x)), - [y] "x" (@bitCast(@Vector(2, u64), y)), + : [x] "x" (@as(@Vector(2, u64), @bitCast(x))), + [y] "x" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .hi_lo => { const product = asm ( \\ vpclmulqdq $0x10, %[x], %[y], %[out] : [out] "=x" (-> @Vector(2, u64)), - : [x] "x" (@bitCast(@Vector(2, u64), x)), - [y] "x" (@bitCast(@Vector(2, u64), y)), + : [x] "x" (@as(@Vector(2, u64), @bitCast(x))), + [y] "x" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, } } @@ -129,28 +129,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { const product = asm ( \\ pmull2 %[out].1q, %[x].2d, %[y].2d : [out] "=w" (-> @Vector(2, u64)), - : [x] "w" (@bitCast(@Vector(2, u64), x)), - [y] "w" (@bitCast(@Vector(2, u64), y)), + : [x] "w" (@as(@Vector(2, u64), @bitCast(x))), + [y] "w" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .lo => { const product = asm ( \\ pmull %[out].1q, %[x].1d, %[y].1d : [out] "=w" (-> @Vector(2, u64)), - : [x] "w" (@bitCast(@Vector(2, u64), x)), - [y] "w" (@bitCast(@Vector(2, u64), y)), + : [x] "w" (@as(@Vector(2, u64), @bitCast(x))), + [y] "w" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, .hi_lo => { const product = asm ( \\ pmull %[out].1q, %[x].1d, %[y].1d : [out] "=w" (-> @Vector(2, u64)), - : [x] "w" (@bitCast(@Vector(2, u64), x >> 64)), - [y] "w" (@bitCast(@Vector(2, u64), y)), + : [x] "w" (@as(@Vector(2, u64), @bitCast(x >> 64))), + [y] "w" (@as(@Vector(2, u64), @bitCast(y))), ); - return @bitCast(u128, product); + return @as(u128, @bitCast(product)); }, } } @@ -167,8 +167,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Software carryless multiplication of two 64-bit integers using native 128-bit registers. fn clmulSoft128(x_: u128, y_: u128, comptime half: Selector) u128 { - const x = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_); - const y = @truncate(u64, if (half == .hi) y_ >> 64 else y_); + const x = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_)); + const y = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_)); const x0 = x & 0x1111111111111110; const x1 = x & 0x2222222222222220; @@ -216,12 +216,12 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Software carryless multiplication of two 128-bit integers using 64-bit registers. fn clmulSoft128_64(x_: u128, y_: u128, comptime half: Selector) u128 { - const a = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_); - const b = @truncate(u64, if (half == .hi) y_ >> 64 else y_); - const a0 = @truncate(u32, a); - const a1 = @truncate(u32, a >> 32); - const b0 = @truncate(u32, b); - const b1 = @truncate(u32, b >> 32); + const a = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_)); + const b = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_)); + const a0 = @as(u32, @truncate(a)); + const a1 = @as(u32, @truncate(a >> 32)); + const b0 = @as(u32, @truncate(b)); + const b1 = @as(u32, @truncate(b >> 32)); const lo = clmulSoft32(a0, b0); const hi = clmulSoft32(a1, b1); const mid = clmulSoft32(a0 ^ a1, b0 ^ b1) ^ lo ^ hi; @@ -256,8 +256,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Multiply two 128-bit integers in GF(2^128). inline fn clmul128(x: u128, y: u128) I256 { if (mul_algorithm == .karatsuba) { - const x_hi = @truncate(u64, x >> 64); - const y_hi = @truncate(u64, y >> 64); + const x_hi = @as(u64, @truncate(x >> 64)); + const y_hi = @as(u64, @truncate(y >> 64)); const r_lo = clmul(x, y, .lo); const r_hi = clmul(x, y, .hi); const r_mid = clmul(x ^ x_hi, y ^ y_hi, .lo) ^ r_lo ^ r_hi; @@ -407,7 +407,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { st.pad(); mem.writeInt(u128, out[0..16], st.acc, endian); - utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Self)]); + utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Self)]); } /// Compute the GHASH of a message. @@ -442,7 +442,7 @@ test "ghash2" { var key: [16]u8 = undefined; var i: usize = 0; while (i < key.len) : (i += 1) { - key[i] = @intCast(u8, i * 15 + 1); + key[i] = @as(u8, @intCast(i * 15 + 1)); } const tvs = [_]struct { len: usize, hash: [:0]const u8 }{ .{ .len = 5263, .hash = "b9395f37c131cd403a327ccf82ec016a" }, @@ -461,7 +461,7 @@ test "ghash2" { var m: [tv.len]u8 = undefined; i = 0; while (i < m.len) : (i += 1) { - m[i] = @truncate(u8, i % 254 + 1); + m[i] = @as(u8, @truncate(i % 254 + 1)); } var st = Ghash.init(&key); st.update(&m); diff --git a/lib/std/crypto/isap.zig b/lib/std/crypto/isap.zig index 5b0da739dead..1d17e32be8a6 100644 --- a/lib/std/crypto/isap.zig +++ b/lib/std/crypto/isap.zig @@ -67,7 +67,7 @@ pub const IsapA128A = struct { var i: usize = 0; while (i < y.len * 8 - 1) : (i += 1) { const cur_byte_pos = i / 8; - const cur_bit_pos = @truncate(u3, 7 - (i % 8)); + const cur_bit_pos = @as(u3, @truncate(7 - (i % 8))); const cur_bit = ((y[cur_byte_pos] >> cur_bit_pos) & 1) << 7; isap.st.addByte(cur_bit, 0); isap.st.permuteR(1); diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig index ddc9b1b84782..d8130bc87a13 100644 --- a/lib/std/crypto/keccak_p.zig +++ b/lib/std/crypto/keccak_p.zig @@ -33,7 +33,7 @@ pub fn KeccakF(comptime f: u11) type { 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008, }; var rc: [max_rounds]T = undefined; - for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @truncate(T, c); + for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @as(T, @truncate(c)); break :rc rc; }; @@ -75,7 +75,7 @@ pub fn KeccakF(comptime f: u11) type { /// XOR a byte into the state at a given offset. pub fn addByte(self: *Self, byte: u8, offset: usize) void { - const z = @sizeOf(T) * @truncate(math.Log2Int(T), offset % @sizeOf(T)); + const z = @sizeOf(T) * @as(math.Log2Int(T), @truncate(offset % @sizeOf(T))); self.st[offset / @sizeOf(T)] ^= @as(T, byte) << z; } diff --git a/lib/std/crypto/kyber_d00.zig b/lib/std/crypto/kyber_d00.zig index 3cb0f02c0d0a..390ff8e7f231 100644 --- a/lib/std/crypto/kyber_d00.zig +++ b/lib/std/crypto/kyber_d00.zig @@ -579,7 +579,7 @@ test "invNTTReductions bounds" { if (j < 0) { break; } - xs[@intCast(usize, j)] = 1; + xs[@as(usize, @intCast(j))] = 1; } } } @@ -615,7 +615,7 @@ fn invertMod(a: anytype, p: @TypeOf(a)) @TypeOf(a) { // Reduce mod q for testing. fn modQ32(x: i32) i16 { - var y = @intCast(i16, @rem(x, @as(i32, Q))); + var y = @as(i16, @intCast(@rem(x, @as(i32, Q)))); if (y < 0) { y += Q; } @@ -638,7 +638,7 @@ fn montReduce(x: i32) i16 { // Note that x q' might be as big as 2³² and could overflow the int32 // multiplication in the last line. However for any int32s a and b, // we have int32(int64(a)*int64(b)) = int32(a*b) and so the result is ok. - const m = @truncate(i16, @truncate(i32, x *% qInv)); + const m = @as(i16, @truncate(@as(i32, @truncate(x *% qInv)))); // Note that x - m q is divisible by R; indeed modulo R we have // @@ -652,7 +652,7 @@ fn montReduce(x: i32) i16 { // and as both 2¹⁵ q ≤ m q, x < 2¹⁵ q, we have // 2¹⁶ q ≤ x - m q < 2¹⁶ and so q ≤ (x - m q) / R < q as desired. const yR = x - @as(i32, m) * @as(i32, Q); - return @bitCast(i16, @truncate(u16, @bitCast(u32, yR) >> 16)); + return @as(i16, @bitCast(@as(u16, @truncate(@as(u32, @bitCast(yR)) >> 16)))); } test "Test montReduce" { @@ -676,7 +676,7 @@ fn feToMont(x: i16) i16 { test "Test feToMont" { var x: i32 = -(1 << 15); while (x < 1 << 15) : (x += 1) { - const y = feToMont(@intCast(i16, x)); + const y = feToMont(@as(i16, @intCast(x))); try testing.expectEqual(modQ32(@as(i32, y)), modQ32(x * r_mod_q)); } } @@ -703,14 +703,14 @@ fn feBarrettReduce(x: i16) i16 { // To actually compute this, note that // // ⌊x 20156/2²⁶⌋ = (20159 x) >> 26. - return x -% @intCast(i16, (@as(i32, x) * 20159) >> 26) *% Q; + return x -% @as(i16, @intCast((@as(i32, x) * 20159) >> 26)) *% Q; } test "Test Barrett reduction" { var x: i32 = -(1 << 15); while (x < 1 << 15) : (x += 1) { - var y1 = feBarrettReduce(@intCast(i16, x)); - const y2 = @mod(@intCast(i16, x), Q); + var y1 = feBarrettReduce(@as(i16, @intCast(x))); + const y2 = @mod(@as(i16, @intCast(x)), Q); if (x < 0 and @rem(-x, Q) == 0) { y1 -= Q; } @@ -729,9 +729,9 @@ fn csubq(x: i16) i16 { test "Test csubq" { var x: i32 = -29439; while (x < 1 << 15) : (x += 1) { - const y1 = csubq(@intCast(i16, x)); - var y2 = @intCast(i16, x); - if (@intCast(i16, x) >= Q) { + const y1 = csubq(@as(i16, @intCast(x))); + var y2 = @as(i16, @intCast(x)); + if (@as(i16, @intCast(x)) >= Q) { y2 -= Q; } try testing.expectEqual(y1, y2); @@ -762,7 +762,7 @@ fn computeZetas() [128]i16 { @setEvalBranchQuota(10000); var ret: [128]i16 = undefined; for (&ret, 0..) |*r, i| { - const t = @intCast(i16, mpow(@as(i32, zeta), @bitReverse(@intCast(u7, i)), Q)); + const t = @as(i16, @intCast(mpow(@as(i32, zeta), @bitReverse(@as(u7, @intCast(i))), Q))); r.* = csubq(feBarrettReduce(feToMont(t))); } return ret; @@ -945,7 +945,7 @@ const Poly = struct { if (i < 0) { break; } - p.cs[@intCast(usize, i)] = feBarrettReduce(p.cs[@intCast(usize, i)]); + p.cs[@as(usize, @intCast(i))] = feBarrettReduce(p.cs[@as(usize, @intCast(i))]); } } @@ -1020,8 +1020,8 @@ const Poly = struct { // = ⌊(2ᵈ/q)x+½⌋ mod⁺ 2ᵈ // = ⌊((x << d) + q/2) / q⌋ mod⁺ 2ᵈ // = DIV((x << d) + q/2, q) & ((1< 0) { const out_shift = comptime 8 - todo; - out[out_off + j] |= @truncate(u8, (in[i] >> in_shift) << out_shift); + out[out_off + j] |= @as(u8, @truncate((in[i] >> in_shift) << out_shift)); const done = comptime @min(@min(d, todo), d - in_shift); todo -= done; @@ -1094,7 +1094,7 @@ const Poly = struct { // = ⌊(qx + 2ᵈ⁻¹)/2ᵈ⌋ // = (qx + (1<<(d-1))) >> d const qx = @as(u32, out) * @as(u32, Q); - ret.cs[out_off + i] = @intCast(i16, (qx + (1 << (d - 1))) >> d); + ret.cs[out_off + i] = @as(i16, @intCast((qx + (1 << (d - 1))) >> d)); } in_off += in_batch_size; @@ -1209,8 +1209,8 @@ const Poly = struct { // Extract each a and b separately and set coefficient in polynomial. inline for (0..batch_count) |j| { const mask2 = comptime (1 << eta) - 1; - const a = @intCast(i16, (d >> (comptime (2 * j * eta))) & mask2); - const b = @intCast(i16, (d >> (comptime ((2 * j + 1) * eta))) & mask2); + const a = @as(i16, @intCast((d >> (comptime (2 * j * eta))) & mask2)); + const b = @as(i16, @intCast((d >> (comptime ((2 * j + 1) * eta))) & mask2)); ret.cs[batch_count * i + j] = a - b; } } @@ -1246,7 +1246,7 @@ const Poly = struct { inline for (ts) |t| { if (t < Q) { - ret.cs[i] = @intCast(i16, t); + ret.cs[i] = @as(i16, @intCast(t)); i += 1; if (i == N) { @@ -1266,11 +1266,11 @@ const Poly = struct { fn toBytes(p: Poly) [bytes_length]u8 { var ret: [bytes_length]u8 = undefined; for (0..comptime N / 2) |i| { - const t0 = @intCast(u16, p.cs[2 * i]); - const t1 = @intCast(u16, p.cs[2 * i + 1]); - ret[3 * i] = @truncate(u8, t0); - ret[3 * i + 1] = @truncate(u8, (t0 >> 8) | (t1 << 4)); - ret[3 * i + 2] = @truncate(u8, t1 >> 4); + const t0 = @as(u16, @intCast(p.cs[2 * i])); + const t1 = @as(u16, @intCast(p.cs[2 * i + 1])); + ret[3 * i] = @as(u8, @truncate(t0)); + ret[3 * i + 1] = @as(u8, @truncate((t0 >> 8) | (t1 << 4))); + ret[3 * i + 2] = @as(u8, @truncate(t1 >> 4)); } return ret; } @@ -1356,7 +1356,7 @@ fn Vec(comptime K: u8) type { fn noise(comptime eta: u8, nonce: u8, seed: *const [32]u8) Self { var ret: Self = undefined; for (0..K) |i| { - ret.ps[i] = Poly.noise(eta, nonce + @intCast(u8, i), seed); + ret.ps[i] = Poly.noise(eta, nonce + @as(u8, @intCast(i)), seed); } return ret; } @@ -1534,7 +1534,7 @@ test "Compression" { test "noise" { var seed: [32]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } try testing.expectEqual(Poly.noise(3, 37, &seed).cs, .{ 0, 0, 1, -1, 0, 2, 0, -1, -1, 3, 0, 1, -2, -2, 0, 1, -2, @@ -1580,7 +1580,7 @@ test "noise" { test "uniform sampling" { var seed: [32]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } try testing.expectEqual(Poly.uniform(seed, 1, 0).cs, .{ 797, 993, 161, 6, 2608, 2385, 2096, 2661, 1676, 247, 2440, @@ -1623,17 +1623,17 @@ test "Test inner PKE" { var seed: [32]u8 = undefined; var pt: [32]u8 = undefined; for (&seed, &pt, 0..) |*s, *p, i| { - s.* = @intCast(u8, i); - p.* = @intCast(u8, i + 32); + s.* = @as(u8, @intCast(i)); + p.* = @as(u8, @intCast(i + 32)); } inline for (modes) |mode| { for (0..100) |i| { var pk: mode.InnerPk = undefined; var sk: mode.InnerSk = undefined; - seed[0] = @intCast(u8, i); + seed[0] = @as(u8, @intCast(i)); mode.innerKeyFromSeed(seed, &pk, &sk); for (0..10) |j| { - seed[1] = @intCast(u8, j); + seed[1] = @as(u8, @intCast(j)); try testing.expectEqual(sk.decrypt(&pk.encrypt(&pt, &seed)), pt); } } @@ -1643,18 +1643,18 @@ test "Test inner PKE" { test "Test happy flow" { var seed: [64]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } inline for (modes) |mode| { for (0..100) |i| { - seed[0] = @intCast(u8, i); + seed[0] = @as(u8, @intCast(i)); const kp = try mode.KeyPair.create(seed); const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes()); try testing.expectEqual(sk, kp.secret_key); const pk = try mode.PublicKey.fromBytes(&kp.public_key.toBytes()); try testing.expectEqual(pk, kp.public_key); for (0..10) |j| { - seed[1] = @intCast(u8, j); + seed[1] = @as(u8, @intCast(j)); const e = pk.encaps(seed[0..32].*); try testing.expectEqual(e.shared_secret, try sk.decaps(&e.ciphertext)); } @@ -1675,7 +1675,7 @@ test "NIST KAT test" { const mode = modeHash[0]; var seed: [48]u8 = undefined; for (&seed, 0..) |*s, i| { - s.* = @intCast(u8, i); + s.* = @as(u8, @intCast(i)); } var f = sha2.Sha256.init(.{}); const fw = f.writer(); diff --git a/lib/std/crypto/md5.zig b/lib/std/crypto/md5.zig index bd4a78c03299..b480cbcd8ee4 100644 --- a/lib/std/crypto/md5.zig +++ b/lib/std/crypto/md5.zig @@ -80,7 +80,7 @@ pub const Md5 = struct { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b_slice.len); + d.buf_len += @as(u8, @intCast(b_slice.len)); // Md5 uses the bottom 64-bits for length padding d.total_len +%= b.len; @@ -103,9 +103,9 @@ pub const Md5 = struct { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[56] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[56] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 8) : (i += 1) { - d.buf[56 + i] = @intCast(u8, len & 0xff); + d.buf[56 + i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } diff --git a/lib/std/crypto/pbkdf2.zig b/lib/std/crypto/pbkdf2.zig index 115fd38b3da8..2e0318369b83 100644 --- a/lib/std/crypto/pbkdf2.zig +++ b/lib/std/crypto/pbkdf2.zig @@ -74,7 +74,7 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com // block // - const blocks_count = @intCast(u32, std.math.divCeil(usize, dk_len, h_len) catch unreachable); + const blocks_count = @as(u32, @intCast(std.math.divCeil(usize, dk_len, h_len) catch unreachable)); var r = dk_len % h_len; if (r == 0) { r = h_len; diff --git a/lib/std/crypto/pcurves/common.zig b/lib/std/crypto/pcurves/common.zig index 5d41bc190a08..edc437517c32 100644 --- a/lib/std/crypto/pcurves/common.zig +++ b/lib/std/crypto/pcurves/common.zig @@ -120,7 +120,7 @@ pub fn Field(comptime params: FieldParams) type { /// Return true if the element is odd. pub fn isOdd(fe: Fe) bool { const s = fe.toBytes(.Little); - return @truncate(u1, s[0]) != 0; + return @as(u1, @truncate(s[0])) != 0; } /// Conditonally replace a field element with `a` if `c` is positive. @@ -179,7 +179,7 @@ pub fn Field(comptime params: FieldParams) type { var x: T = n; var t = a; while (true) { - if (@truncate(u1, x) != 0) fe = fe.mul(t); + if (@as(u1, @truncate(x)) != 0) fe = fe.mul(t); x >>= 1; if (x == 0) break; t = t.sq(); @@ -233,7 +233,7 @@ pub fn Field(comptime params: FieldParams) type { } var v_opp: Limbs = undefined; fiat.opp(&v_opp, v); - fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (@bitSizeOf(Word) - 1)), v, v_opp); + fiat.selectznz(&v, @as(u1, @truncate(f[f.len - 1] >> (@bitSizeOf(Word) - 1))), v, v_opp); const precomp = blk: { var precomp: Limbs = undefined; diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig index a797fbce3e2d..668c0115b242 100644 --- a/lib/std/crypto/pcurves/p256.zig +++ b/lib/std/crypto/pcurves/p256.zig @@ -318,7 +318,7 @@ pub const P256 = struct { var t = P256.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { - t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8)); + t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8))); } return t; } @@ -326,8 +326,8 @@ pub const P256 = struct { fn slide(s: [32]u8) [2 * 32 + 1]i8 { var e: [2 * 32 + 1]i8 = undefined; for (s, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -351,9 +351,9 @@ pub const P256 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -366,7 +366,7 @@ pub const P256 = struct { var q = P256.identityElement; var pos: usize = 252; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -445,15 +445,15 @@ pub const P256 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); diff --git a/lib/std/crypto/pcurves/p256/p256_64.zig b/lib/std/crypto/pcurves/p256/p256_64.zig index e8ba37e845e0..e8dbaead3384 100644 --- a/lib/std/crypto/pcurves/p256/p256_64.zig +++ b/lib/std/crypto/pcurves/p256/p256_64.zig @@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1355,62 +1355,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & @as(u64, 0xff))); + const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff)))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & @as(u64, 0xff))); + const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff)))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & @as(u64, 0xff))); + const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff)))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & @as(u64, 0xff))); + const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff)))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & @as(u64, 0xff))); + const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff)))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & @as(u64, 0xff))); + const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff)))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & @as(u64, 0xff))); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & @as(u64, 0xff))); + const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff)))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff)))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & @as(u64, 0xff))); + const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff)))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & @as(u64, 0xff))); + const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff)))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & @as(u64, 0xff))); + const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff)))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & @as(u64, 0xff))); + const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff)))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & @as(u64, 0xff))); + const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff)))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & @as(u64, 0xff))); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & @as(u64, 0xff))); + const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff)))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff)))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & @as(u64, 0xff))); + const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff)))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & @as(u64, 0xff))); + const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff)))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & @as(u64, 0xff))); + const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff)))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & @as(u64, 0xff))); + const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff)))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & @as(u64, 0xff))); + const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff)))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & @as(u64, 0xff))); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & @as(u64, 0xff))); + const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff)))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff)))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & @as(u64, 0xff))); + const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff)))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & @as(u64, 0xff))); + const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff)))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & @as(u64, 0xff))); + const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff)))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & @as(u64, 0xff))); + const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff)))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & @as(u64, 0xff))); + const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff)))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & @as(u64, 0xff))); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff)))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1593,7 +1593,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1)); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1)))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1))))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1)); @@ -1707,7 +1707,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & @as(u64, 0x1))); + const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1)))); var x75: u64 = undefined; cmovznzU64(&x75, x74, @as(u64, 0x0), x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig index ea102360cf8c..152c2b878784 100644 --- a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig +++ b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig @@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1559,62 +1559,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & @as(u64, 0xff))); + const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff)))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & @as(u64, 0xff))); + const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff)))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & @as(u64, 0xff))); + const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff)))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & @as(u64, 0xff))); + const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff)))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & @as(u64, 0xff))); + const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff)))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & @as(u64, 0xff))); + const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff)))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & @as(u64, 0xff))); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & @as(u64, 0xff))); + const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff)))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff)))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & @as(u64, 0xff))); + const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff)))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & @as(u64, 0xff))); + const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff)))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & @as(u64, 0xff))); + const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff)))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & @as(u64, 0xff))); + const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff)))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & @as(u64, 0xff))); + const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff)))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & @as(u64, 0xff))); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & @as(u64, 0xff))); + const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff)))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff)))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & @as(u64, 0xff))); + const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff)))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & @as(u64, 0xff))); + const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff)))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & @as(u64, 0xff))); + const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff)))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & @as(u64, 0xff))); + const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff)))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & @as(u64, 0xff))); + const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff)))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & @as(u64, 0xff))); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & @as(u64, 0xff))); + const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff)))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff)))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & @as(u64, 0xff))); + const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff)))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & @as(u64, 0xff))); + const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff)))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & @as(u64, 0xff))); + const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff)))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & @as(u64, 0xff))); + const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff)))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & @as(u64, 0xff))); + const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff)))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & @as(u64, 0xff))); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff)))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1797,7 +1797,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1)); - const x3 = @truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1))); + const x3 = @as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1)); @@ -1911,7 +1911,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & @as(u64, 0x1))); + const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1)))); var x75: u64 = undefined; cmovznzU64(&x75, x74, @as(u64, 0x0), x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig index 3d96592f50cb..d5afd6eb4ddd 100644 --- a/lib/std/crypto/pcurves/p384.zig +++ b/lib/std/crypto/pcurves/p384.zig @@ -318,7 +318,7 @@ pub const P384 = struct { var t = P384.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { - t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8)); + t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8))); } return t; } @@ -326,8 +326,8 @@ pub const P384 = struct { fn slide(s: [48]u8) [2 * 48 + 1]i8 { var e: [2 * 48 + 1]i8 = undefined; for (s, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -351,9 +351,9 @@ pub const P384 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -366,7 +366,7 @@ pub const P384 = struct { var q = P384.identityElement; var pos: usize = 380; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -445,15 +445,15 @@ pub const P384 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); diff --git a/lib/std/crypto/pcurves/p384/p384_64.zig b/lib/std/crypto/pcurves/p384/p384_64.zig index 45c12835b345..f25a7d65b5ae 100644 --- a/lib/std/crypto/pcurves/p384/p384_64.zig +++ b/lib/std/crypto/pcurves/p384/p384_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -2928,90 +2928,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void { const x4 = (arg1[2]); const x5 = (arg1[1]); const x6 = (arg1[0]); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); const x18 = (x16 >> 8); - const x19 = @truncate(u8, (x18 & 0xff)); - const x20 = @truncate(u8, (x18 >> 8)); - const x21 = @truncate(u8, (x5 & 0xff)); + const x19 = @as(u8, @truncate((x18 & 0xff))); + const x20 = @as(u8, @truncate((x18 >> 8))); + const x21 = @as(u8, @truncate((x5 & 0xff))); const x22 = (x5 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); const x32 = (x30 >> 8); - const x33 = @truncate(u8, (x32 & 0xff)); - const x34 = @truncate(u8, (x32 >> 8)); - const x35 = @truncate(u8, (x4 & 0xff)); + const x33 = @as(u8, @truncate((x32 & 0xff))); + const x34 = @as(u8, @truncate((x32 >> 8))); + const x35 = @as(u8, @truncate((x4 & 0xff))); const x36 = (x4 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); const x46 = (x44 >> 8); - const x47 = @truncate(u8, (x46 & 0xff)); - const x48 = @truncate(u8, (x46 >> 8)); - const x49 = @truncate(u8, (x3 & 0xff)); + const x47 = @as(u8, @truncate((x46 & 0xff))); + const x48 = @as(u8, @truncate((x46 >> 8))); + const x49 = @as(u8, @truncate((x3 & 0xff))); const x50 = (x3 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); + const x59 = @as(u8, @truncate((x58 & 0xff))); const x60 = (x58 >> 8); - const x61 = @truncate(u8, (x60 & 0xff)); - const x62 = @truncate(u8, (x60 >> 8)); - const x63 = @truncate(u8, (x2 & 0xff)); + const x61 = @as(u8, @truncate((x60 & 0xff))); + const x62 = @as(u8, @truncate((x60 >> 8))); + const x63 = @as(u8, @truncate((x2 & 0xff))); const x64 = (x2 >> 8); - const x65 = @truncate(u8, (x64 & 0xff)); + const x65 = @as(u8, @truncate((x64 & 0xff))); const x66 = (x64 >> 8); - const x67 = @truncate(u8, (x66 & 0xff)); + const x67 = @as(u8, @truncate((x66 & 0xff))); const x68 = (x66 >> 8); - const x69 = @truncate(u8, (x68 & 0xff)); + const x69 = @as(u8, @truncate((x68 & 0xff))); const x70 = (x68 >> 8); - const x71 = @truncate(u8, (x70 & 0xff)); + const x71 = @as(u8, @truncate((x70 & 0xff))); const x72 = (x70 >> 8); - const x73 = @truncate(u8, (x72 & 0xff)); + const x73 = @as(u8, @truncate((x72 & 0xff))); const x74 = (x72 >> 8); - const x75 = @truncate(u8, (x74 & 0xff)); - const x76 = @truncate(u8, (x74 >> 8)); - const x77 = @truncate(u8, (x1 & 0xff)); + const x75 = @as(u8, @truncate((x74 & 0xff))); + const x76 = @as(u8, @truncate((x74 >> 8))); + const x77 = @as(u8, @truncate((x1 & 0xff))); const x78 = (x1 >> 8); - const x79 = @truncate(u8, (x78 & 0xff)); + const x79 = @as(u8, @truncate((x78 & 0xff))); const x80 = (x78 >> 8); - const x81 = @truncate(u8, (x80 & 0xff)); + const x81 = @as(u8, @truncate((x80 & 0xff))); const x82 = (x80 >> 8); - const x83 = @truncate(u8, (x82 & 0xff)); + const x83 = @as(u8, @truncate((x82 & 0xff))); const x84 = (x82 >> 8); - const x85 = @truncate(u8, (x84 & 0xff)); + const x85 = @as(u8, @truncate((x84 & 0xff))); const x86 = (x84 >> 8); - const x87 = @truncate(u8, (x86 & 0xff)); + const x87 = @as(u8, @truncate((x86 & 0xff))); const x88 = (x86 >> 8); - const x89 = @truncate(u8, (x88 & 0xff)); - const x90 = @truncate(u8, (x88 >> 8)); + const x89 = @as(u8, @truncate((x88 & 0xff))); + const x90 = @as(u8, @truncate((x88 >> 8))); out1[0] = x7; out1[1] = x9; out1[2] = x11; @@ -3246,7 +3246,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -3408,7 +3408,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ cmovznzU64(&x102, x3, (arg5[4]), x94); var x103: u64 = undefined; cmovznzU64(&x103, x3, (arg5[5]), x96); - const x104 = @truncate(u1, (x28 & 0x1)); + const x104 = @as(u1, @truncate((x28 & 0x1))); var x105: u64 = undefined; cmovznzU64(&x105, x104, 0x0, x7); var x106: u64 = undefined; diff --git a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig index 0ce7727148fc..fc787ba7b952 100644 --- a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig +++ b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -2982,90 +2982,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void { const x4 = (arg1[2]); const x5 = (arg1[1]); const x6 = (arg1[0]); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); const x18 = (x16 >> 8); - const x19 = @truncate(u8, (x18 & 0xff)); - const x20 = @truncate(u8, (x18 >> 8)); - const x21 = @truncate(u8, (x5 & 0xff)); + const x19 = @as(u8, @truncate((x18 & 0xff))); + const x20 = @as(u8, @truncate((x18 >> 8))); + const x21 = @as(u8, @truncate((x5 & 0xff))); const x22 = (x5 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); const x32 = (x30 >> 8); - const x33 = @truncate(u8, (x32 & 0xff)); - const x34 = @truncate(u8, (x32 >> 8)); - const x35 = @truncate(u8, (x4 & 0xff)); + const x33 = @as(u8, @truncate((x32 & 0xff))); + const x34 = @as(u8, @truncate((x32 >> 8))); + const x35 = @as(u8, @truncate((x4 & 0xff))); const x36 = (x4 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); const x46 = (x44 >> 8); - const x47 = @truncate(u8, (x46 & 0xff)); - const x48 = @truncate(u8, (x46 >> 8)); - const x49 = @truncate(u8, (x3 & 0xff)); + const x47 = @as(u8, @truncate((x46 & 0xff))); + const x48 = @as(u8, @truncate((x46 >> 8))); + const x49 = @as(u8, @truncate((x3 & 0xff))); const x50 = (x3 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); + const x59 = @as(u8, @truncate((x58 & 0xff))); const x60 = (x58 >> 8); - const x61 = @truncate(u8, (x60 & 0xff)); - const x62 = @truncate(u8, (x60 >> 8)); - const x63 = @truncate(u8, (x2 & 0xff)); + const x61 = @as(u8, @truncate((x60 & 0xff))); + const x62 = @as(u8, @truncate((x60 >> 8))); + const x63 = @as(u8, @truncate((x2 & 0xff))); const x64 = (x2 >> 8); - const x65 = @truncate(u8, (x64 & 0xff)); + const x65 = @as(u8, @truncate((x64 & 0xff))); const x66 = (x64 >> 8); - const x67 = @truncate(u8, (x66 & 0xff)); + const x67 = @as(u8, @truncate((x66 & 0xff))); const x68 = (x66 >> 8); - const x69 = @truncate(u8, (x68 & 0xff)); + const x69 = @as(u8, @truncate((x68 & 0xff))); const x70 = (x68 >> 8); - const x71 = @truncate(u8, (x70 & 0xff)); + const x71 = @as(u8, @truncate((x70 & 0xff))); const x72 = (x70 >> 8); - const x73 = @truncate(u8, (x72 & 0xff)); + const x73 = @as(u8, @truncate((x72 & 0xff))); const x74 = (x72 >> 8); - const x75 = @truncate(u8, (x74 & 0xff)); - const x76 = @truncate(u8, (x74 >> 8)); - const x77 = @truncate(u8, (x1 & 0xff)); + const x75 = @as(u8, @truncate((x74 & 0xff))); + const x76 = @as(u8, @truncate((x74 >> 8))); + const x77 = @as(u8, @truncate((x1 & 0xff))); const x78 = (x1 >> 8); - const x79 = @truncate(u8, (x78 & 0xff)); + const x79 = @as(u8, @truncate((x78 & 0xff))); const x80 = (x78 >> 8); - const x81 = @truncate(u8, (x80 & 0xff)); + const x81 = @as(u8, @truncate((x80 & 0xff))); const x82 = (x80 >> 8); - const x83 = @truncate(u8, (x82 & 0xff)); + const x83 = @as(u8, @truncate((x82 & 0xff))); const x84 = (x82 >> 8); - const x85 = @truncate(u8, (x84 & 0xff)); + const x85 = @as(u8, @truncate((x84 & 0xff))); const x86 = (x84 >> 8); - const x87 = @truncate(u8, (x86 & 0xff)); + const x87 = @as(u8, @truncate((x86 & 0xff))); const x88 = (x86 >> 8); - const x89 = @truncate(u8, (x88 & 0xff)); - const x90 = @truncate(u8, (x88 >> 8)); + const x89 = @as(u8, @truncate((x88 & 0xff))); + const x90 = @as(u8, @truncate((x88 >> 8))); out1[0] = x7; out1[1] = x9; out1[2] = x11; @@ -3300,7 +3300,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -3462,7 +3462,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[ cmovznzU64(&x102, x3, (arg5[4]), x94); var x103: u64 = undefined; cmovznzU64(&x103, x3, (arg5[5]), x96); - const x104 = @truncate(u1, (x28 & 0x1)); + const x104 = @as(u1, @truncate((x28 & 0x1))); var x105: u64 = undefined; cmovznzU64(&x105, x104, 0x0, x7); var x106: u64 = undefined; diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig index f0b086f9744a..cd7f1faf755c 100644 --- a/lib/std/crypto/pcurves/secp256k1.zig +++ b/lib/std/crypto/pcurves/secp256k1.zig @@ -67,8 +67,8 @@ pub const Secp256k1 = struct { const t1 = math.mulWide(u256, k, 21949224512762693861512883645436906316123769664773102907882521278123970637873); const t2 = math.mulWide(u256, k, 103246583619904461035481197785446227098457807945486720222659797044629401272177); - const c1 = @truncate(u128, t1 >> 384) + @truncate(u1, t1 >> 383); - const c2 = @truncate(u128, t2 >> 384) + @truncate(u1, t2 >> 383); + const c1 = @as(u128, @truncate(t1 >> 384)) + @as(u1, @truncate(t1 >> 383)); + const c2 = @as(u128, @truncate(t2 >> 384)) + @as(u1, @truncate(t2 >> 383)); var buf: [32]u8 = undefined; @@ -346,7 +346,7 @@ pub const Secp256k1 = struct { var t = Secp256k1.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { - t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8)); + t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8))); } return t; } @@ -354,8 +354,8 @@ pub const Secp256k1 = struct { fn slide(s: [32]u8) [2 * 32 + 1]i8 { var e: [2 * 32 + 1]i8 = undefined; for (s, 0..) |x, i| { - e[i * 2 + 0] = @as(i8, @truncate(u4, x)); - e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4)); + e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x))); + e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4))); } // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7 var carry: i8 = 0; @@ -379,9 +379,9 @@ pub const Secp256k1 = struct { while (true) : (pos -= 1) { const slot = e[pos]; if (slot > 0) { - q = q.add(pc[@intCast(usize, slot)]); + q = q.add(pc[@as(usize, @intCast(slot))]); } else if (slot < 0) { - q = q.sub(pc[@intCast(usize, -slot)]); + q = q.sub(pc[@as(usize, @intCast(-slot))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -394,7 +394,7 @@ pub const Secp256k1 = struct { var q = Secp256k1.identityElement; var pos: usize = 252; while (true) : (pos -= 4) { - const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos))); + const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos))))); if (vartime) { if (slot != 0) { q = q.add(pc[slot]); @@ -482,15 +482,15 @@ pub const Secp256k1 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); @@ -523,15 +523,15 @@ pub const Secp256k1 = struct { while (true) : (pos -= 1) { const slot1 = e1[pos]; if (slot1 > 0) { - q = q.add(pc1[@intCast(usize, slot1)]); + q = q.add(pc1[@as(usize, @intCast(slot1))]); } else if (slot1 < 0) { - q = q.sub(pc1[@intCast(usize, -slot1)]); + q = q.sub(pc1[@as(usize, @intCast(-slot1))]); } const slot2 = e2[pos]; if (slot2 > 0) { - q = q.add(pc2[@intCast(usize, slot2)]); + q = q.add(pc2[@as(usize, @intCast(slot2))]); } else if (slot2 < 0) { - q = q.sub(pc2[@intCast(usize, -slot2)]); + q = q.sub(pc2[@as(usize, @intCast(-slot2))]); } if (pos == 0) break; q = q.dbl().dbl().dbl().dbl(); diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig index 5643ea88d509..ae3e97c61989 100644 --- a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig +++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1488,62 +1488,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & 0xff)); + const x5 = @as(u8, @truncate((x4 & 0xff))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & 0xff))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & 0xff)); + const x21 = @as(u8, @truncate((x20 & 0xff))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & 0xff))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & 0xff)); + const x35 = @as(u8, @truncate((x34 & 0xff))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & 0xff))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & 0xff)); + const x49 = @as(u8, @truncate((x48 & 0xff))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & 0xff))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1726,7 +1726,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -1840,7 +1840,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & 0x1)); + const x74 = @as(u1, @truncate((x22 & 0x1))); var x75: u64 = undefined; cmovznzU64(&x75, x74, 0x0, x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig index aca1bd3063bb..12c833bb334f 100644 --- a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig +++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig @@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); - out1.* = @truncate(u64, x); - out2.* = @truncate(u64, x >> 64); + out1.* = @as(u64, @truncate(x)); + out2.* = @as(u64, @truncate(x >> 64)); } /// The function cmovznzU64 is a single-word conditional move. @@ -1548,62 +1548,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void { const x2 = (arg1[2]); const x3 = (arg1[1]); const x4 = (arg1[0]); - const x5 = @truncate(u8, (x4 & 0xff)); + const x5 = @as(u8, @truncate((x4 & 0xff))); const x6 = (x4 >> 8); - const x7 = @truncate(u8, (x6 & 0xff)); + const x7 = @as(u8, @truncate((x6 & 0xff))); const x8 = (x6 >> 8); - const x9 = @truncate(u8, (x8 & 0xff)); + const x9 = @as(u8, @truncate((x8 & 0xff))); const x10 = (x8 >> 8); - const x11 = @truncate(u8, (x10 & 0xff)); + const x11 = @as(u8, @truncate((x10 & 0xff))); const x12 = (x10 >> 8); - const x13 = @truncate(u8, (x12 & 0xff)); + const x13 = @as(u8, @truncate((x12 & 0xff))); const x14 = (x12 >> 8); - const x15 = @truncate(u8, (x14 & 0xff)); + const x15 = @as(u8, @truncate((x14 & 0xff))); const x16 = (x14 >> 8); - const x17 = @truncate(u8, (x16 & 0xff)); - const x18 = @truncate(u8, (x16 >> 8)); - const x19 = @truncate(u8, (x3 & 0xff)); + const x17 = @as(u8, @truncate((x16 & 0xff))); + const x18 = @as(u8, @truncate((x16 >> 8))); + const x19 = @as(u8, @truncate((x3 & 0xff))); const x20 = (x3 >> 8); - const x21 = @truncate(u8, (x20 & 0xff)); + const x21 = @as(u8, @truncate((x20 & 0xff))); const x22 = (x20 >> 8); - const x23 = @truncate(u8, (x22 & 0xff)); + const x23 = @as(u8, @truncate((x22 & 0xff))); const x24 = (x22 >> 8); - const x25 = @truncate(u8, (x24 & 0xff)); + const x25 = @as(u8, @truncate((x24 & 0xff))); const x26 = (x24 >> 8); - const x27 = @truncate(u8, (x26 & 0xff)); + const x27 = @as(u8, @truncate((x26 & 0xff))); const x28 = (x26 >> 8); - const x29 = @truncate(u8, (x28 & 0xff)); + const x29 = @as(u8, @truncate((x28 & 0xff))); const x30 = (x28 >> 8); - const x31 = @truncate(u8, (x30 & 0xff)); - const x32 = @truncate(u8, (x30 >> 8)); - const x33 = @truncate(u8, (x2 & 0xff)); + const x31 = @as(u8, @truncate((x30 & 0xff))); + const x32 = @as(u8, @truncate((x30 >> 8))); + const x33 = @as(u8, @truncate((x2 & 0xff))); const x34 = (x2 >> 8); - const x35 = @truncate(u8, (x34 & 0xff)); + const x35 = @as(u8, @truncate((x34 & 0xff))); const x36 = (x34 >> 8); - const x37 = @truncate(u8, (x36 & 0xff)); + const x37 = @as(u8, @truncate((x36 & 0xff))); const x38 = (x36 >> 8); - const x39 = @truncate(u8, (x38 & 0xff)); + const x39 = @as(u8, @truncate((x38 & 0xff))); const x40 = (x38 >> 8); - const x41 = @truncate(u8, (x40 & 0xff)); + const x41 = @as(u8, @truncate((x40 & 0xff))); const x42 = (x40 >> 8); - const x43 = @truncate(u8, (x42 & 0xff)); + const x43 = @as(u8, @truncate((x42 & 0xff))); const x44 = (x42 >> 8); - const x45 = @truncate(u8, (x44 & 0xff)); - const x46 = @truncate(u8, (x44 >> 8)); - const x47 = @truncate(u8, (x1 & 0xff)); + const x45 = @as(u8, @truncate((x44 & 0xff))); + const x46 = @as(u8, @truncate((x44 >> 8))); + const x47 = @as(u8, @truncate((x1 & 0xff))); const x48 = (x1 >> 8); - const x49 = @truncate(u8, (x48 & 0xff)); + const x49 = @as(u8, @truncate((x48 & 0xff))); const x50 = (x48 >> 8); - const x51 = @truncate(u8, (x50 & 0xff)); + const x51 = @as(u8, @truncate((x50 & 0xff))); const x52 = (x50 >> 8); - const x53 = @truncate(u8, (x52 & 0xff)); + const x53 = @as(u8, @truncate((x52 & 0xff))); const x54 = (x52 >> 8); - const x55 = @truncate(u8, (x54 & 0xff)); + const x55 = @as(u8, @truncate((x54 & 0xff))); const x56 = (x54 >> 8); - const x57 = @truncate(u8, (x56 & 0xff)); + const x57 = @as(u8, @truncate((x56 & 0xff))); const x58 = (x56 >> 8); - const x59 = @truncate(u8, (x58 & 0xff)); - const x60 = @truncate(u8, (x58 >> 8)); + const x59 = @as(u8, @truncate((x58 & 0xff))); + const x60 = @as(u8, @truncate((x58 >> 8))); out1[0] = x5; out1[1] = x7; out1[2] = x9; @@ -1786,7 +1786,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ var x1: u64 = undefined; var x2: u1 = undefined; addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1); - const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1))); + const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1)))); var x4: u64 = undefined; var x5: u1 = undefined; addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1); @@ -1900,7 +1900,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[ cmovznzU64(&x72, x3, (arg5[2]), x66); var x73: u64 = undefined; cmovznzU64(&x73, x3, (arg5[3]), x68); - const x74 = @truncate(u1, (x22 & 0x1)); + const x74 = @as(u1, @truncate((x22 & 0x1))); var x75: u64 = undefined; cmovznzU64(&x75, x74, 0x0, x7); var x76: u64 = undefined; diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig index 1eeee39a5a10..fecd7f1239f9 100644 --- a/lib/std/crypto/phc_encoding.zig +++ b/lib/std/crypto/phc_encoding.zig @@ -193,7 +193,7 @@ pub fn serialize(params: anytype, str: []u8) Error![]const u8 { pub fn calcSize(params: anytype) usize { var buf = io.countingWriter(io.null_writer); serializeTo(params, buf.writer()) catch unreachable; - return @intCast(usize, buf.bytes_written); + return @as(usize, @intCast(buf.bytes_written)); } fn serializeTo(params: anytype, out: anytype) !void { diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index 51e1c2ab2492..5bcb75169dca 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -76,12 +76,12 @@ pub const Poly1305 = struct { const m1 = h1r0 +% h0r1; const m2 = h2r0 +% h1r1; - const t0 = @truncate(u64, m0); - v = @addWithOverflow(@truncate(u64, m1), @truncate(u64, m0 >> 64)); + const t0 = @as(u64, @truncate(m0)); + v = @addWithOverflow(@as(u64, @truncate(m1)), @as(u64, @truncate(m0 >> 64))); const t1 = v[0]; - v = add(@truncate(u64, m2), @truncate(u64, m1 >> 64), v[1]); + v = add(@as(u64, @truncate(m2)), @as(u64, @truncate(m1 >> 64)), v[1]); const t2 = v[0]; - v = add(@truncate(u64, m3), @truncate(u64, m2 >> 64), v[1]); + v = add(@as(u64, @truncate(m3)), @as(u64, @truncate(m2 >> 64)), v[1]); const t3 = v[0]; // Partial reduction @@ -98,9 +98,9 @@ pub const Poly1305 = struct { h1 = v[0]; h2 +%= v[1]; const cc = (cclo | (@as(u128, cchi) << 64)) >> 2; - v = @addWithOverflow(h0, @truncate(u64, cc)); + v = @addWithOverflow(h0, @as(u64, @truncate(cc))); h0 = v[0]; - v = add(h1, @truncate(u64, cc >> 64), v[1]); + v = add(h1, @as(u64, @truncate(cc >> 64)), v[1]); h1 = v[0]; h2 +%= v[1]; } @@ -185,7 +185,7 @@ pub const Poly1305 = struct { mem.writeIntLittle(u64, out[0..8], st.h[0]); mem.writeIntLittle(u64, out[8..16], st.h[1]); - utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Poly1305)]); + utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Poly1305)]); } pub fn create(out: *[mac_length]u8, msg: []const u8, key: *const [key_length]u8) void { diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index c8a639ad0b14..231f9410997a 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -337,8 +337,8 @@ pub fn Salsa(comptime rounds: comptime_int) type { var d: [4]u32 = undefined; d[0] = mem.readIntLittle(u32, nonce[0..4]); d[1] = mem.readIntLittle(u32, nonce[4..8]); - d[2] = @truncate(u32, counter); - d[3] = @truncate(u32, counter >> 32); + d[2] = @as(u32, @truncate(counter)); + d[3] = @as(u32, @truncate(counter >> 32)); SalsaImpl(rounds).salsaXor(out, in, keyToWords(key), d); } }; diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index 97dd9b95d0c7..8745a3b34e15 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -73,11 +73,11 @@ fn salsaXor(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) } fn blockMix(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) u32, r: u30) void { - blockCopy(tmp, @alignCast(16, in[(2 * r - 1) * 16 ..]), 1); + blockCopy(tmp, @alignCast(in[(2 * r - 1) * 16 ..]), 1); var i: usize = 0; while (i < 2 * r) : (i += 2) { - salsaXor(tmp, @alignCast(16, in[i * 16 ..]), @alignCast(16, out[i * 8 ..])); - salsaXor(tmp, @alignCast(16, in[i * 16 + 16 ..]), @alignCast(16, out[i * 8 + r * 16 ..])); + salsaXor(tmp, @alignCast(in[i * 16 ..]), @alignCast(out[i * 8 ..])); + salsaXor(tmp, @alignCast(in[i * 16 + 16 ..]), @alignCast(out[i * 8 + r * 16 ..])); } } @@ -87,8 +87,8 @@ fn integerify(b: []align(16) const u32, r: u30) u64 { } fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) u32) void { - var x = @alignCast(16, xy[0 .. 32 * r]); - var y = @alignCast(16, xy[32 * r ..]); + var x: []align(16) u32 = @alignCast(xy[0 .. 32 * r]); + var y: []align(16) u32 = @alignCast(xy[32 * r ..]); for (x, 0..) |*v1, j| { v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]); @@ -97,21 +97,21 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) var tmp: [16]u32 align(16) = undefined; var i: usize = 0; while (i < n) : (i += 2) { - blockCopy(@alignCast(16, v[i * (32 * r) ..]), x, 2 * r); + blockCopy(@alignCast(v[i * (32 * r) ..]), x, 2 * r); blockMix(&tmp, x, y, r); - blockCopy(@alignCast(16, v[(i + 1) * (32 * r) ..]), y, 2 * r); + blockCopy(@alignCast(v[(i + 1) * (32 * r) ..]), y, 2 * r); blockMix(&tmp, y, x, r); } i = 0; while (i < n) : (i += 2) { - var j = @intCast(usize, integerify(x, r) & (n - 1)); - blockXor(x, @alignCast(16, v[j * (32 * r) ..]), 2 * r); + var j = @as(usize, @intCast(integerify(x, r) & (n - 1))); + blockXor(x, @alignCast(v[j * (32 * r) ..]), 2 * r); blockMix(&tmp, x, y, r); - j = @intCast(usize, integerify(y, r) & (n - 1)); - blockXor(y, @alignCast(16, v[j * (32 * r) ..]), 2 * r); + j = @as(usize, @intCast(integerify(y, r) & (n - 1))); + blockXor(y, @alignCast(v[j * (32 * r) ..]), 2 * r); blockMix(&tmp, y, x, r); } @@ -147,12 +147,12 @@ pub const Params = struct { const r: u30 = 8; if (ops < mem_limit / 32) { const max_n = ops / (r * 4); - return Self{ .r = r, .p = 1, .ln = @intCast(u6, math.log2(max_n)) }; + return Self{ .r = r, .p = 1, .ln = @as(u6, @intCast(math.log2(max_n))) }; } else { - const max_n = mem_limit / (@intCast(usize, r) * 128); - const ln = @intCast(u6, math.log2(max_n)); + const max_n = mem_limit / (@as(usize, @intCast(r)) * 128); + const ln = @as(u6, @intCast(math.log2(max_n))); const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); - return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln }; + return Self{ .r = r, .p = @as(u30, @intCast(max_rp / @as(u64, r))), .ln = ln }; } } }; @@ -185,7 +185,7 @@ pub fn kdf( const n64 = @as(u64, 1) << params.ln; if (n64 > max_size) return KdfError.WeakParameters; - const n = @intCast(usize, n64); + const n = @as(usize, @intCast(n64)); if (@as(u64, params.r) * @as(u64, params.p) >= 1 << 30 or params.r > max_int / 128 / @as(u64, params.p) or params.r > max_int / 256 or @@ -201,7 +201,7 @@ pub fn kdf( try pwhash.pbkdf2(dk, password, salt, 1, HmacSha256); var i: u32 = 0; while (i < params.p) : (i += 1) { - smix(@alignCast(16, dk[i * 128 * params.r ..]), params.r, n, v, xy); + smix(@alignCast(dk[i * 128 * params.r ..]), params.r, n, v, xy); } try pwhash.pbkdf2(derived_key, password, dk, 1, HmacSha256); } @@ -309,7 +309,7 @@ const crypt_format = struct { pub fn calcSize(params: anytype) usize { var buf = io.countingWriter(io.null_writer); serializeTo(params, buf.writer()) catch unreachable; - return @intCast(usize, buf.bytes_written); + return @as(usize, @intCast(buf.bytes_written)); } fn serializeTo(params: anytype, out: anytype) !void { @@ -343,7 +343,7 @@ const crypt_format = struct { fn intEncode(dst: []u8, src: anytype) void { var n = src; for (dst) |*x| { - x.* = map64[@truncate(u6, n)]; + x.* = map64[@as(u6, @truncate(n))]; n = math.shr(@TypeOf(src), n, 6); } } @@ -352,7 +352,7 @@ const crypt_format = struct { var v: T = 0; for (src, 0..) |x, i| { const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding; - v |= @intCast(T, vi) << @intCast(math.Log2Int(T), i * 6); + v |= @as(T, @intCast(vi)) << @as(math.Log2Int(T), @intCast(i * 6)); } return v; } @@ -366,10 +366,10 @@ const crypt_format = struct { const leftover = src[i * 4 ..]; var v: u24 = 0; for (leftover, 0..) |_, j| { - v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @intCast(u5, j * 6); + v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @as(u5, @intCast(j * 6)); } for (dst[i * 3 ..], 0..) |*x, j| { - x.* = @truncate(u8, v >> @intCast(u5, j * 8)); + x.* = @as(u8, @truncate(v >> @as(u5, @intCast(j * 8)))); } } @@ -382,7 +382,7 @@ const crypt_format = struct { const leftover = src[i * 3 ..]; var v: u24 = 0; for (leftover, 0..) |x, j| { - v |= @as(u24, x) << @intCast(u5, j * 8); + v |= @as(u24, x) << @as(u5, @intCast(j * 8)); } intEncode(dst[i * 4 ..], v); } diff --git a/lib/std/crypto/sha1.zig b/lib/std/crypto/sha1.zig index 1f5f3eaae2a2..82e23e0647af 100644 --- a/lib/std/crypto/sha1.zig +++ b/lib/std/crypto/sha1.zig @@ -75,7 +75,7 @@ pub const Sha1 = struct { // Copy any remainder for next pass. @memcpy(d.buf[d.buf_len..][0 .. b.len - off], b[off..]); - d.buf_len += @intCast(u8, b[off..].len); + d.buf_len += @as(u8, @intCast(b[off..].len)); d.total_len += b.len; } @@ -97,9 +97,9 @@ pub const Sha1 = struct { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 8) : (i += 1) { - d.buf[63 - i] = @intCast(u8, len & 0xff); + d.buf[63 - i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig index bd5a7cc5d4d6..ce543d3906b5 100644 --- a/lib/std/crypto/sha2.zig +++ b/lib/std/crypto/sha2.zig @@ -132,7 +132,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b[off..].len); + d.buf_len += @as(u8, @intCast(b[off..].len)); d.total_len += b.len; } @@ -159,9 +159,9 @@ fn Sha2x32(comptime params: Sha2Params32) type { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 8) : (i += 1) { - d.buf[63 - i] = @intCast(u8, len & 0xff); + d.buf[63 - i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } @@ -194,7 +194,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { fn round(d: *Self, b: *const [64]u8) void { var s: [64]u32 align(16) = undefined; - for (@ptrCast(*align(1) const [16]u32, b), 0..) |*elem, i| { + for (@as(*align(1) const [16]u32, @ptrCast(b)), 0..) |*elem, i| { s[i] = mem.readIntBig(u32, mem.asBytes(elem)); } @@ -203,7 +203,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { .aarch64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) { var x: v4u32 = d.s[0..4].*; var y: v4u32 = d.s[4..8].*; - const s_v = @ptrCast(*[16]v4u32, &s); + const s_v = @as(*[16]v4u32, @ptrCast(&s)); comptime var k: u8 = 0; inline while (k < 16) : (k += 1) { @@ -241,7 +241,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { .x86_64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) { var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] }; var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] }; - const s_v = @ptrCast(*[16]v4u32, &s); + const s_v = @as(*[16]v4u32, @ptrCast(&s)); comptime var k: u8 = 0; inline while (k < 16) : (k += 1) { @@ -273,7 +273,7 @@ fn Sha2x32(comptime params: Sha2Params32) type { : [x] "=x" (-> v4u32), : [_] "0" (x), [y] "x" (y), - [_] "{xmm0}" (@bitCast(v4u32, @bitCast(u128, w) >> 64)), + [_] "{xmm0}" (@as(v4u32, @bitCast(@as(u128, @bitCast(w)) >> 64))), ); } @@ -624,7 +624,7 @@ fn Sha2x64(comptime params: Sha2Params64) type { // Copy any remainder for next pass. const b_slice = b[off..]; @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice); - d.buf_len += @intCast(u8, b[off..].len); + d.buf_len += @as(u8, @intCast(b[off..].len)); d.total_len += b.len; } @@ -651,9 +651,9 @@ fn Sha2x64(comptime params: Sha2Params64) type { // Append message length. var i: usize = 1; var len = d.total_len >> 5; - d.buf[127] = @intCast(u8, d.total_len & 0x1f) << 3; + d.buf[127] = @as(u8, @intCast(d.total_len & 0x1f)) << 3; while (i < 16) : (i += 1) { - d.buf[127 - i] = @intCast(u8, len & 0xff); + d.buf[127 - i] = @as(u8, @intCast(len & 0xff)); len >>= 8; } diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig index 70f4f2fd53a3..439958739718 100644 --- a/lib/std/crypto/siphash.zig +++ b/lib/std/crypto/siphash.zig @@ -83,13 +83,13 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round @call(.always_inline, round, .{ self, blob }); } - self.msg_len +%= @truncate(u8, b.len); + self.msg_len +%= @as(u8, @truncate(b.len)); } fn final(self: *Self, b: []const u8) T { std.debug.assert(b.len < 8); - self.msg_len +%= @truncate(u8, b.len); + self.msg_len +%= @as(u8, @truncate(b.len)); var buf = [_]u8{0} ** 8; @memcpy(buf[0..b.len], b); @@ -202,7 +202,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) const b_slice = b[off + aligned_len ..]; @memcpy(self.buf[self.buf_len..][0..b_slice.len], b_slice); - self.buf_len += @intCast(u8, b_slice.len); + self.buf_len += @as(u8, @intCast(b_slice.len)); } pub fn peek(self: Self) [mac_length]u8 { @@ -329,7 +329,7 @@ test "siphash64-2-4 sanity" { var buffer: [64]u8 = undefined; for (vectors, 0..) |vector, i| { - buffer[i] = @intCast(u8, i); + buffer[i] = @as(u8, @intCast(i)); var out: [siphash.mac_length]u8 = undefined; siphash.create(&out, buffer[0..i], test_key); @@ -409,7 +409,7 @@ test "siphash128-2-4 sanity" { var buffer: [64]u8 = undefined; for (vectors, 0..) |vector, i| { - buffer[i] = @intCast(u8, i); + buffer[i] = @as(u8, @intCast(i)); var out: [siphash.mac_length]u8 = undefined; siphash.create(&out, buffer[0..i], test_key[0..]); @@ -420,7 +420,7 @@ test "siphash128-2-4 sanity" { test "iterative non-divisible update" { var buf: [1024]u8 = undefined; for (&buf, 0..) |*e, i| { - e.* = @truncate(u8, i); + e.* = @as(u8, @truncate(i)); } const key = "0x128dad08f12307"; diff --git a/lib/std/crypto/tlcsprng.zig b/lib/std/crypto/tlcsprng.zig index 54a30cfabaf5..344da9745d41 100644 --- a/lib/std/crypto/tlcsprng.zig +++ b/lib/std/crypto/tlcsprng.zig @@ -102,7 +102,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void { wipe_mem = mem.asBytes(&S.buf); } } - const ctx = @ptrCast(*Context, wipe_mem.ptr); + const ctx = @as(*Context, @ptrCast(wipe_mem.ptr)); switch (ctx.init_state) { .uninitialized => { @@ -158,7 +158,7 @@ fn childAtForkHandler() callconv(.C) void { } fn fillWithCsprng(buffer: []u8) void { - const ctx = @ptrCast(*Context, wipe_mem.ptr); + const ctx = @as(*Context, @ptrCast(wipe_mem.ptr)); return ctx.rng.fill(buffer); } @@ -174,7 +174,7 @@ fn initAndFill(buffer: []u8) void { // the `std.options.cryptoRandomSeed` function is provided. std.options.cryptoRandomSeed(&seed); - const ctx = @ptrCast(*Context, wipe_mem.ptr); + const ctx = @as(*Context, @ptrCast(wipe_mem.ptr)); ctx.rng = Rng.init(seed); std.crypto.utils.secureZero(u8, &seed); diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig index 4c03c4897333..eb5a6b4c1a30 100644 --- a/lib/std/crypto/tls.zig +++ b/lib/std/crypto/tls.zig @@ -371,12 +371,12 @@ pub fn hkdfExpandLabel( const tls13 = "tls13 "; var buf: [2 + 1 + tls13.len + max_label_len + 1 + max_context_len]u8 = undefined; mem.writeIntBig(u16, buf[0..2], len); - buf[2] = @intCast(u8, tls13.len + label.len); + buf[2] = @as(u8, @intCast(tls13.len + label.len)); buf[3..][0..tls13.len].* = tls13.*; var i: usize = 3 + tls13.len; @memcpy(buf[i..][0..label.len], label); i += label.len; - buf[i] = @intCast(u8, context.len); + buf[i] = @as(u8, @intCast(context.len)); i += 1; @memcpy(buf[i..][0..context.len], context); i += context.len; @@ -411,24 +411,24 @@ pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeO assert(@sizeOf(E) == 2); var result: [tags.len * 2]u8 = undefined; for (tags, 0..) |elem, i| { - result[i * 2] = @truncate(u8, @intFromEnum(elem) >> 8); - result[i * 2 + 1] = @truncate(u8, @intFromEnum(elem)); + result[i * 2] = @as(u8, @truncate(@intFromEnum(elem) >> 8)); + result[i * 2 + 1] = @as(u8, @truncate(@intFromEnum(elem))); } return array(2, result); } pub inline fn int2(x: u16) [2]u8 { return .{ - @truncate(u8, x >> 8), - @truncate(u8, x), + @as(u8, @truncate(x >> 8)), + @as(u8, @truncate(x)), }; } pub inline fn int3(x: u24) [3]u8 { return .{ - @truncate(u8, x >> 16), - @truncate(u8, x >> 8), - @truncate(u8, x), + @as(u8, @truncate(x >> 16)), + @as(u8, @truncate(x >> 8)), + @as(u8, @truncate(x)), }; } @@ -513,7 +513,7 @@ pub const Decoder = struct { .Enum => |info| { const int = d.decode(info.tag_type); if (info.is_exhaustive) @compileError("exhaustive enum cannot be used"); - return @enumFromInt(T, int); + return @as(T, @enumFromInt(int)); }, else => @compileError("unsupported type: " ++ @typeName(T)), } diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 94ecf0d3ef7d..6d5bb86fed0a 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -140,7 +140,7 @@ pub fn InitError(comptime Stream: type) type { /// /// `host` is only borrowed during this function call. pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) InitError(@TypeOf(stream))!Client { - const host_len = @intCast(u16, host.len); + const host_len = @as(u16, @intCast(host.len)); var random_buffer: [128]u8 = undefined; crypto.random.bytes(&random_buffer); @@ -194,7 +194,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In int2(host_len); const extensions_header = - int2(@intCast(u16, extensions_payload.len + host_len)) ++ + int2(@as(u16, @intCast(extensions_payload.len + host_len))) ++ extensions_payload; const legacy_compression_methods = 0x0100; @@ -209,13 +209,13 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In const out_handshake = [_]u8{@intFromEnum(tls.HandshakeType.client_hello)} ++ - int3(@intCast(u24, client_hello.len + host_len)) ++ + int3(@as(u24, @intCast(client_hello.len + host_len))) ++ client_hello; const plaintext_header = [_]u8{ @intFromEnum(tls.ContentType.handshake), 0x03, 0x01, // legacy_record_version - } ++ int2(@intCast(u16, out_handshake.len + host_len)) ++ out_handshake; + } ++ int2(@as(u16, @intCast(out_handshake.len + host_len))) ++ out_handshake; { var iovecs = [_]std.os.iovec_const{ @@ -457,7 +457,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In const auth_tag = record_decoder.array(P.AEAD.tag_length).*; const V = @Vector(P.AEAD.nonce_length, u8); const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8); - const operand: V = pad ++ @bitCast([8]u8, big(read_seq)); + const operand: V = pad ++ @as([8]u8, @bitCast(big(read_seq))); read_seq += 1; const nonce = @as(V, p.server_handshake_iv) ^ operand; P.AEAD.decrypt(cleartext, ciphertext, auth_tag, record_header, nonce, p.server_handshake_key) catch @@ -466,7 +466,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In }, }; - const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]); + const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1])); if (inner_ct != .handshake) return error.TlsUnexpectedMessage; var ctd = tls.Decoder.fromTheirSlice(cleartext[0 .. cleartext.len - 1]); @@ -520,7 +520,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In const subject_cert: Certificate = .{ .buffer = certd.buf, - .index = @intCast(u32, certd.idx), + .index = @as(u32, @intCast(certd.idx)), }; const subject = try subject_cert.parse(); if (cert_index == 0) { @@ -534,7 +534,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In if (pub_key.len > main_cert_pub_key_buf.len) return error.CertificatePublicKeyInvalid; @memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key); - main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len); + main_cert_pub_key_len = @as(@TypeOf(main_cert_pub_key_len), @intCast(pub_key.len)); } else { try prev_cert.verify(subject, now_sec); } @@ -679,7 +679,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In .write_seq = 0, .partial_cleartext_idx = 0, .partial_ciphertext_idx = 0, - .partial_ciphertext_end = @intCast(u15, leftover.len), + .partial_ciphertext_end = @as(u15, @intCast(leftover.len)), .received_close_notify = false, .application_cipher = app_cipher, .partially_read_buffer = undefined, @@ -797,11 +797,11 @@ fn prepareCiphertextRecord( const overhead_len = tls.record_header_len + P.AEAD.tag_length + 1; const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len; while (true) { - const encrypted_content_len = @intCast(u16, @min( + const encrypted_content_len = @as(u16, @intCast(@min( @min(bytes.len - bytes_i, max_ciphertext_len - 1), ciphertext_buf.len - close_notify_alert_reserved - overhead_len - ciphertext_end, - )); + ))); if (encrypted_content_len == 0) return .{ .iovec_end = iovec_end, .ciphertext_end = ciphertext_end, @@ -826,7 +826,7 @@ fn prepareCiphertextRecord( const auth_tag = ciphertext_buf[ciphertext_end..][0..P.AEAD.tag_length]; ciphertext_end += auth_tag.len; const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8); - const operand: V = pad ++ @bitCast([8]u8, big(c.write_seq)); + const operand: V = pad ++ @as([8]u8, @bitCast(big(c.write_seq))); c.write_seq += 1; // TODO send key_update on overflow const nonce = @as(V, p.client_iv) ^ operand; P.AEAD.encrypt(ciphertext, auth_tag, cleartext, ad, nonce, p.client_key); @@ -920,7 +920,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) // Give away the buffered cleartext we have, if any. const partial_cleartext = c.partially_read_buffer[c.partial_cleartext_idx..c.partial_ciphertext_idx]; if (partial_cleartext.len > 0) { - const amt = @intCast(u15, vp.put(partial_cleartext)); + const amt = @as(u15, @intCast(vp.put(partial_cleartext))); c.partial_cleartext_idx += amt; if (c.partial_cleartext_idx == c.partial_ciphertext_idx and @@ -1037,7 +1037,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) in = 0; continue; } - const ct = @enumFromInt(tls.ContentType, frag[in]); + const ct = @as(tls.ContentType, @enumFromInt(frag[in])); in += 1; const legacy_version = mem.readIntBig(u16, frag[in..][0..2]); in += 2; @@ -1070,8 +1070,8 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) switch (ct) { .alert => { if (in + 2 > frag.len) return error.TlsDecodeError; - const level = @enumFromInt(tls.AlertLevel, frag[in]); - const desc = @enumFromInt(tls.AlertDescription, frag[in + 1]); + const level = @as(tls.AlertLevel, @enumFromInt(frag[in])); + const desc = @as(tls.AlertDescription, @enumFromInt(frag[in + 1])); _ = level; try desc.toError(); @@ -1089,7 +1089,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) in += ciphertext_len; const auth_tag = frag[in..][0..P.AEAD.tag_length].*; const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8); - const operand: V = pad ++ @bitCast([8]u8, big(c.read_seq)); + const operand: V = pad ++ @as([8]u8, @bitCast(big(c.read_seq))); const nonce: [P.AEAD.nonce_length]u8 = @as(V, p.server_iv) ^ operand; const out_buf = vp.peek(); const cleartext_buf = if (ciphertext.len <= out_buf.len) @@ -1105,11 +1105,11 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) c.read_seq = try std.math.add(u64, c.read_seq, 1); - const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]); + const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1])); switch (inner_ct) { .alert => { - const level = @enumFromInt(tls.AlertLevel, cleartext[0]); - const desc = @enumFromInt(tls.AlertDescription, cleartext[1]); + const level = @as(tls.AlertLevel, @enumFromInt(cleartext[0])); + const desc = @as(tls.AlertDescription, @enumFromInt(cleartext[1])); if (desc == .close_notify) { c.received_close_notify = true; c.partial_ciphertext_end = c.partial_ciphertext_idx; @@ -1124,7 +1124,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) .handshake => { var ct_i: usize = 0; while (true) { - const handshake_type = @enumFromInt(tls.HandshakeType, cleartext[ct_i]); + const handshake_type = @as(tls.HandshakeType, @enumFromInt(cleartext[ct_i])); ct_i += 1; const handshake_len = mem.readIntBig(u24, cleartext[ct_i..][0..3]); ct_i += 3; @@ -1148,7 +1148,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) } c.read_seq = 0; - switch (@enumFromInt(tls.KeyUpdateRequest, handshake[0])) { + switch (@as(tls.KeyUpdateRequest, @enumFromInt(handshake[0]))) { .update_requested => { switch (c.application_cipher) { inline else => |*p| { @@ -1186,13 +1186,13 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec) c.partially_read_buffer[c.partial_ciphertext_idx..][0..msg.len], msg, ); - c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), c.partial_ciphertext_idx + msg.len); + c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(c.partial_ciphertext_idx + msg.len)); } else { const amt = vp.put(msg); if (amt < msg.len) { const rest = msg[amt..]; c.partial_cleartext_idx = 0; - c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), rest.len); + c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(rest.len)); @memcpy(c.partially_read_buffer[0..rest.len], rest); } } @@ -1220,12 +1220,12 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize { const saved_buf = frag[in..]; if (c.partial_ciphertext_idx > c.partial_cleartext_idx) { // There is cleartext at the beginning already which we need to preserve. - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + saved_buf.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + saved_buf.len)); @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx..][0..saved_buf.len], saved_buf); } else { c.partial_cleartext_idx = 0; c.partial_ciphertext_idx = 0; - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), saved_buf.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(saved_buf.len)); @memcpy(c.partially_read_buffer[0..saved_buf.len], saved_buf); } return out; @@ -1235,14 +1235,14 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize { fn finishRead2(c: *Client, first: []const u8, frag1: []const u8, out: usize) usize { if (c.partial_ciphertext_idx > c.partial_cleartext_idx) { // There is cleartext at the beginning already which we need to preserve. - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + first.len + frag1.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + first.len + frag1.len)); // TODO: eliminate this call to copyForwards std.mem.copyForwards(u8, c.partially_read_buffer[c.partial_ciphertext_idx..][0..first.len], first); @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..][0..frag1.len], frag1); } else { c.partial_cleartext_idx = 0; c.partial_ciphertext_idx = 0; - c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), first.len + frag1.len); + c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(first.len + frag1.len)); // TODO: eliminate this call to copyForwards std.mem.copyForwards(u8, c.partially_read_buffer[0..first.len], first); @memcpy(c.partially_read_buffer[first.len..][0..frag1.len], frag1); diff --git a/lib/std/crypto/utils.zig b/lib/std/crypto/utils.zig index 14a235e4189c..ab1b6eab6a7f 100644 --- a/lib/std/crypto/utils.zig +++ b/lib/std/crypto/utils.zig @@ -24,7 +24,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool { const s = @typeInfo(C).Int.bits; const Cu = std.meta.Int(.unsigned, s); const Cext = std.meta.Int(.unsigned, s + 1); - return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s)); + return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s)))); }, .Vector => |info| { const C = info.child; @@ -35,7 +35,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool { const s = @typeInfo(C).Int.bits; const Cu = std.meta.Int(.unsigned, s); const Cext = std.meta.Int(.unsigned, s + 1); - return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s)); + return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s)))); }, else => { @compileError("Only arrays and vectors can be compared"); @@ -60,14 +60,14 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E i -= 1; const x1 = a[i]; const x2 = b[i]; - gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq; - eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits); + gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq; + eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits)); } } else { for (a, 0..) |x1, i| { const x2 = b[i]; - gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq; - eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits); + gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq; + eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits)); } } if (gt != 0) { @@ -102,7 +102,7 @@ pub fn timingSafeAdd(comptime T: type, a: []const T, b: []const T, result: []T, carry = ov1[1] | ov2[1]; } } - return @bitCast(bool, carry); + return @as(bool, @bitCast(carry)); } /// Subtract two integers serialized as arrays of the same size, in constant time. @@ -129,7 +129,7 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T, borrow = ov1[1] | ov2[1]; } } - return @bitCast(bool, borrow); + return @as(bool, @bitCast(borrow)); } /// Sets a slice to zeroes. diff --git a/lib/std/cstr.zig b/lib/std/cstr.zig index 9bd98a72b710..0888edf10d69 100644 --- a/lib/std/cstr.zig +++ b/lib/std/cstr.zig @@ -89,12 +89,12 @@ pub const NullTerminated2DArray = struct { return NullTerminated2DArray{ .allocator = allocator, .byte_count = byte_count, - .ptr = @ptrCast(?[*:null]?[*:0]u8, buf.ptr), + .ptr = @as(?[*:null]?[*:0]u8, @ptrCast(buf.ptr)), }; } pub fn deinit(self: *NullTerminated2DArray) void { - const buf = @ptrCast([*]u8, self.ptr); + const buf = @as([*]u8, @ptrCast(self.ptr)); self.allocator.free(buf[0..self.byte_count]); } }; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index e0726d5444db..44f6ce136759 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -460,8 +460,8 @@ pub const StackIterator = struct { // We are unable to determine validity of memory for freestanding targets if (native_os == .freestanding) return true; - const aligned_address = address & ~@intCast(usize, (mem.page_size - 1)); - const aligned_memory = @ptrFromInt([*]align(mem.page_size) u8, aligned_address)[0..mem.page_size]; + const aligned_address = address & ~@as(usize, @intCast((mem.page_size - 1))); + const aligned_memory = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_address))[0..mem.page_size]; if (native_os != .windows) { if (native_os != .wasi) { @@ -511,7 +511,7 @@ pub const StackIterator = struct { if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp)) return null; - const new_fp = math.add(usize, @ptrFromInt(*const usize, fp).*, fp_bias) catch return null; + const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null; // Sanity check: the stack grows down thus all the parent frames must be // be at addresses that are greater (or equal) than the previous one. @@ -520,9 +520,9 @@ pub const StackIterator = struct { if (new_fp != 0 and new_fp < self.fp) return null; - const new_pc = @ptrFromInt( + const new_pc = @as( *const usize, - math.add(usize, fp, pc_offset) catch return null, + @ptrFromInt(math.add(usize, fp, pc_offset) catch return null), ).*; self.fp = new_fp; @@ -555,10 +555,10 @@ pub fn writeCurrentStackTrace( pub noinline fn walkStackWindows(addresses: []usize) usize { if (builtin.cpu.arch == .x86) { // RtlVirtualUnwind doesn't exist on x86 - return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @ptrCast(**anyopaque, addresses.ptr), null); + return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @as(**anyopaque, @ptrCast(addresses.ptr)), null); } - const tib = @ptrCast(*const windows.NT_TIB, &windows.teb().Reserved1); + const tib = @as(*const windows.NT_TIB, @ptrCast(&windows.teb().Reserved1)); var context: windows.CONTEXT = std.mem.zeroes(windows.CONTEXT); windows.ntdll.RtlCaptureContext(&context); @@ -584,7 +584,7 @@ pub noinline fn walkStackWindows(addresses: []usize) usize { ); } else { // leaf function - context.setIp(@ptrFromInt(*u64, current_regs.sp).*); + context.setIp(@as(*u64, @ptrFromInt(current_regs.sp)).*); context.setSp(current_regs.sp + @sizeOf(usize)); } @@ -734,7 +734,7 @@ fn printLineInfo( if (printLineFromFile(out_stream, li)) { if (li.column > 0) { // The caret already takes one char - const space_needed = @intCast(usize, li.column - 1); + const space_needed = @as(usize, @intCast(li.column - 1)); try out_stream.writeByteNTimes(' ', space_needed); try tty_config.setColor(out_stream, .green); @@ -883,7 +883,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8 pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo { nosuspend { const mapped_mem = try mapWholeFile(elf_file); - const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]); + const hdr = @as(*const elf.Ehdr, @ptrCast(&mapped_mem[0])); if (!mem.eql(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion; @@ -896,14 +896,13 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn const shoff = hdr.e_shoff; const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx); - const str_shdr = @ptrCast( - *const elf.Shdr, - @alignCast(@alignOf(elf.Shdr), &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow]), - ); + const str_shdr: *const elf.Shdr = @ptrCast(@alignCast( + &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow], + )); const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size]; - const shdrs = @ptrCast( + const shdrs = @as( [*]const elf.Shdr, - @alignCast(@alignOf(elf.Shdr), &mapped_mem[shoff]), + @ptrCast(@alignCast(&mapped_mem[shoff])), )[0..hdr.e_shnum]; var opt_debug_info: ?[]const u8 = null; @@ -982,10 +981,7 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo { const mapped_mem = try mapWholeFile(macho_file); - const hdr = @ptrCast( - *const macho.mach_header_64, - @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr), - ); + const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr)); if (hdr.magic != macho.MH_MAGIC_64) return error.InvalidDebugInfo; @@ -998,9 +994,9 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn else => {}, } else return error.MissingDebugInfo; - const syms = @ptrCast( + const syms = @as( [*]const macho.nlist_64, - @alignCast(@alignOf(macho.nlist_64), &mapped_mem[symtab.symoff]), + @ptrCast(@alignCast(&mapped_mem[symtab.symoff])), )[0..symtab.nsyms]; const strings = mapped_mem[symtab.stroff..][0 .. symtab.strsize - 1 :0]; @@ -1055,7 +1051,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn }, .fun_strx => { state = .fun_size; - last_sym.size = @intCast(u32, sym.n_value); + last_sym.size = @as(u32, @intCast(sym.n_value)); }, else => return error.InvalidDebugInfo, } @@ -1283,10 +1279,10 @@ pub const DebugInfo = struct { var it = macho.LoadCommandIterator{ .ncmds = header.ncmds, - .buffer = @alignCast(@alignOf(u64), @ptrFromInt( + .buffer = @alignCast(@as( [*]u8, - @intFromPtr(header) + @sizeOf(macho.mach_header_64), - ))[0..header.sizeofcmds], + @ptrFromInt(@intFromPtr(header) + @sizeOf(macho.mach_header_64)), + )[0..header.sizeofcmds]), }; while (it.next()) |cmd| switch (cmd.cmd()) { .SEGMENT_64 => { @@ -1332,7 +1328,7 @@ pub const DebugInfo = struct { return obj_di; } - const mapped_module = @ptrFromInt([*]const u8, module.base_address)[0..module.size]; + const mapped_module = @as([*]const u8, @ptrFromInt(module.base_address))[0..module.size]; const obj_di = try self.allocator.create(ModuleDebugInfo); errdefer self.allocator.destroy(obj_di); @@ -1465,10 +1461,7 @@ pub const ModuleDebugInfo = switch (native_os) { const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking }); const mapped_mem = try mapWholeFile(o_file); - const hdr = @ptrCast( - *const macho.mach_header_64, - @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr), - ); + const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr)); if (hdr.magic != std.macho.MH_MAGIC_64) return error.InvalidDebugInfo; @@ -1487,21 +1480,18 @@ pub const ModuleDebugInfo = switch (native_os) { if (segcmd == null or symtabcmd == null) return error.MissingDebugInfo; // Parse symbols - const strtab = @ptrCast( + const strtab = @as( [*]const u8, - &mapped_mem[symtabcmd.?.stroff], + @ptrCast(&mapped_mem[symtabcmd.?.stroff]), )[0 .. symtabcmd.?.strsize - 1 :0]; - const symtab = @ptrCast( + const symtab = @as( [*]const macho.nlist_64, - @alignCast( - @alignOf(macho.nlist_64), - &mapped_mem[symtabcmd.?.symoff], - ), + @ptrCast(@alignCast(&mapped_mem[symtabcmd.?.symoff])), )[0..symtabcmd.?.nsyms]; // TODO handle tentative (common) symbols var addr_table = std.StringHashMap(u64).init(allocator); - try addr_table.ensureTotalCapacity(@intCast(u32, symtab.len)); + try addr_table.ensureTotalCapacity(@as(u32, @intCast(symtab.len))); for (symtab) |sym| { if (sym.n_strx == 0) continue; if (sym.undf() or sym.tentative() or sym.abs()) continue; @@ -1943,49 +1933,49 @@ fn dumpSegfaultInfoPosix(sig: i32, addr: usize, ctx_ptr: ?*const anyopaque) void switch (native_arch) { .x86 => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]); - const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP])); + const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP])); dumpStackTraceFromBase(bp, ip); }, .x86_64 => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (native_os) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]), - .freebsd => @intCast(usize, ctx.mcontext.rip), - .openbsd => @intCast(usize, ctx.sc_rip), - .macos => @intCast(usize, ctx.mcontext.ss.rip), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.rip)), + .openbsd => @as(usize, @intCast(ctx.sc_rip)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)), else => unreachable, }; const bp = switch (native_os) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]), - .openbsd => @intCast(usize, ctx.sc_rbp), - .freebsd => @intCast(usize, ctx.mcontext.rbp), - .macos => @intCast(usize, ctx.mcontext.ss.rbp), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])), + .openbsd => @as(usize, @intCast(ctx.sc_rbp)), + .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)), else => unreachable, }; dumpStackTraceFromBase(bp, ip); }, .arm => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.arm_pc); - const bp = @intCast(usize, ctx.mcontext.arm_fp); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.arm_pc)); + const bp = @as(usize, @intCast(ctx.mcontext.arm_fp)); dumpStackTraceFromBase(bp, ip); }, .aarch64 => { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.pc), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr), - else => @intCast(usize, ctx.mcontext.pc), + .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)), + else => @as(usize, @intCast(ctx.mcontext.pc)), }; // x29 is the ABI-designated frame pointer const bp = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.fp), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]), - else => @intCast(usize, ctx.mcontext.regs[29]), + .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])), + else => @as(usize, @intCast(ctx.mcontext.regs[29])), }; dumpStackTraceFromBase(bp, ip); }, diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index 4de08b25d7c6..aa1ac6959fd4 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -462,7 +462,7 @@ const LineNumberProgram = struct { }); return debug.LineInfo{ - .line = if (self.prev_line >= 0) @intCast(u64, self.prev_line) else 0, + .line = if (self.prev_line >= 0) @as(u64, @intCast(self.prev_line)) else 0, .column = self.prev_column, .file_name = file_name, }; @@ -533,7 +533,7 @@ fn parseFormValueConstant(in_stream: anytype, signed: bool, endian: std.builtin. -1 => blk: { if (signed) { const x = try nosuspend leb.readILEB128(i64, in_stream); - break :blk @bitCast(u64, x); + break :blk @as(u64, @bitCast(x)); } else { const x = try nosuspend leb.readULEB128(u64, in_stream); break :blk x; @@ -939,12 +939,12 @@ pub const DwarfInfo = struct { .Const => |c| try c.asUnsignedLe(), .RangeListOffset => |idx| off: { if (compile_unit.is_64) { - const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx); + const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 8 * idx)); if (offset_loc + 8 > debug_ranges.len) return badDwarf(); const offset = mem.readInt(u64, debug_ranges[offset_loc..][0..8], di.endian); break :off compile_unit.rnglists_base + offset; } else { - const offset_loc = @intCast(usize, compile_unit.rnglists_base + 4 * idx); + const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 4 * idx)); if (offset_loc + 4 > debug_ranges.len) return badDwarf(); const offset = mem.readInt(u32, debug_ranges[offset_loc..][0..4], di.endian); break :off compile_unit.rnglists_base + offset; @@ -1134,7 +1134,7 @@ pub const DwarfInfo = struct { ), }; if (attr.form_id == FORM.implicit_const) { - result.attrs.items[i].value.Const.payload = @bitCast(u64, attr.payload); + result.attrs.items[i].value.Const.payload = @as(u64, @bitCast(attr.payload)); } } return result; @@ -1438,7 +1438,7 @@ pub const DwarfInfo = struct { const addr_size = debug_addr[compile_unit.addr_base - 2]; const seg_size = debug_addr[compile_unit.addr_base - 1]; - const byte_offset = @intCast(usize, compile_unit.addr_base + (addr_size + seg_size) * index); + const byte_offset = @as(usize, @intCast(compile_unit.addr_base + (addr_size + seg_size) * index)); if (byte_offset + addr_size > debug_addr.len) return badDwarf(); return switch (addr_size) { 1 => debug_addr[byte_offset], diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 38c5de9cad60..3342ac3f6d91 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -71,18 +71,18 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator { while (_DYNAMIC[i].d_tag != elf.DT_NULL) : (i += 1) { switch (_DYNAMIC[i].d_tag) { elf.DT_DEBUG => { - const ptr = @ptrFromInt(?*RDebug, _DYNAMIC[i].d_val); + const ptr = @as(?*RDebug, @ptrFromInt(_DYNAMIC[i].d_val)); if (ptr) |r_debug| { if (r_debug.r_version != 1) return error.InvalidExe; break :init r_debug.r_map; } }, elf.DT_PLTGOT => { - const ptr = @ptrFromInt(?[*]usize, _DYNAMIC[i].d_val); + const ptr = @as(?[*]usize, @ptrFromInt(_DYNAMIC[i].d_val)); if (ptr) |got_table| { // The address to the link_map structure is stored in // the second slot - break :init @ptrFromInt(?*LinkMap, got_table[1]); + break :init @as(?*LinkMap, @ptrFromInt(got_table[1])); } }, else => {}, @@ -132,7 +132,7 @@ pub const ElfDynLib = struct { ); defer os.munmap(file_bytes); - const eh = @ptrCast(*elf.Ehdr, file_bytes.ptr); + const eh = @as(*elf.Ehdr, @ptrCast(file_bytes.ptr)); if (!mem.eql(u8, eh.e_ident[0..4], elf.MAGIC)) return error.NotElfFile; if (eh.e_type != elf.ET.DYN) return error.NotDynamicLibrary; @@ -149,10 +149,10 @@ pub const ElfDynLib = struct { i += 1; ph_addr += eh.e_phentsize; }) { - const ph = @ptrFromInt(*elf.Phdr, ph_addr); + const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), - elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, elf_addr + ph.p_offset), + elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(elf_addr + ph.p_offset)), else => {}, } } @@ -180,7 +180,7 @@ pub const ElfDynLib = struct { i += 1; ph_addr += eh.e_phentsize; }) { - const ph = @ptrFromInt(*elf.Phdr, ph_addr); + const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => { // The VirtAddr may not be page-aligned; in such case there will be @@ -188,7 +188,7 @@ pub const ElfDynLib = struct { const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); - const ptr = @ptrFromInt([*]align(mem.page_size) u8, aligned_addr); + const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr)); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { // If it does not need write access, it can be mapped from the fd. @@ -228,11 +228,11 @@ pub const ElfDynLib = struct { while (dynv[i] != 0) : (i += 2) { const p = base + dynv[i + 1]; switch (dynv[i]) { - elf.DT_STRTAB => maybe_strings = @ptrFromInt([*:0]u8, p), - elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p), - elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]os.Elf_Symndx, p), - elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p), - elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p), + elf.DT_STRTAB => maybe_strings = @as([*:0]u8, @ptrFromInt(p)), + elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)), + elf.DT_HASH => maybe_hashtab = @as([*]os.Elf_Symndx, @ptrFromInt(p)), + elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)), + elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)), else => {}, } } @@ -261,7 +261,7 @@ pub const ElfDynLib = struct { pub fn lookup(self: *ElfDynLib, comptime T: type, name: [:0]const u8) ?T { if (self.lookupAddress("", name)) |symbol| { - return @ptrFromInt(T, symbol); + return @as(T, @ptrFromInt(symbol)); } else { return null; } @@ -276,8 +276,8 @@ pub const ElfDynLib = struct { var i: usize = 0; while (i < self.hashtab[1]) : (i += 1) { - if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue; - if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info & 0xf)) & OK_TYPES)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info >> 4)) & OK_BINDS)) continue; if (0 == self.syms[i].st_shndx) continue; if (!mem.eql(u8, name, mem.sliceTo(self.strings + self.syms[i].st_name, 0))) continue; if (maybe_versym) |versym| { @@ -301,15 +301,15 @@ pub const ElfDynLib = struct { fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*:0]u8) bool { var def = def_arg; - const vsym = @bitCast(u32, vsym_arg) & 0x7fff; + const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff; while (true) { if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym) break; if (def.vd_next == 0) return false; - def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next); + def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next)); } - const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux); + const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux)); return mem.eql(u8, vername, mem.sliceTo(strings + aux.vda_name, 0)); } @@ -347,7 +347,7 @@ pub const WindowsDynLib = struct { pub fn lookup(self: *WindowsDynLib, comptime T: type, name: [:0]const u8) ?T { if (windows.kernel32.GetProcAddress(self.dll, name.ptr)) |addr| { - return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), addr)); + return @as(T, @ptrCast(@alignCast(addr))); } else { return null; } @@ -381,7 +381,7 @@ pub const DlDynlib = struct { // dlsym (and other dl-functions) secretly take shadow parameter - return address on stack // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66826 if (@call(.never_tail, system.dlsym, .{ self.handle, name.ptr })) |symbol| { - return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), symbol)); + return @as(T, @ptrCast(@alignCast(symbol))); } else { return null; } diff --git a/lib/std/elf.zig b/lib/std/elf.zig index 9a71f73e05b5..d464d7d12be9 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -434,8 +434,8 @@ pub const Header = struct { } pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header { - const hdr32 = @ptrCast(*const Elf32_Ehdr, hdr_buf); - const hdr64 = @ptrCast(*const Elf64_Ehdr, hdr_buf); + const hdr32 = @as(*const Elf32_Ehdr, @ptrCast(hdr_buf)); + const hdr64 = @as(*const Elf64_Ehdr, @ptrCast(hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], MAGIC)) return error.InvalidElfMagic; if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion; @@ -454,7 +454,7 @@ pub const Header = struct { const machine = if (need_bswap) blk: { const value = @intFromEnum(hdr32.e_machine); - break :blk @enumFromInt(EM, @byteSwap(value)); + break :blk @as(EM, @enumFromInt(@byteSwap(value))); } else hdr32.e_machine; return @as(Header, .{ @@ -725,10 +725,10 @@ pub const Elf32_Sym = extern struct { st_shndx: Elf32_Section, pub inline fn st_type(self: @This()) u4 { - return @truncate(u4, self.st_info); + return @as(u4, @truncate(self.st_info)); } pub inline fn st_bind(self: @This()) u4 { - return @truncate(u4, self.st_info >> 4); + return @as(u4, @truncate(self.st_info >> 4)); } }; pub const Elf64_Sym = extern struct { @@ -740,10 +740,10 @@ pub const Elf64_Sym = extern struct { st_size: Elf64_Xword, pub inline fn st_type(self: @This()) u4 { - return @truncate(u4, self.st_info); + return @as(u4, @truncate(self.st_info)); } pub inline fn st_bind(self: @This()) u4 { - return @truncate(u4, self.st_info >> 4); + return @as(u4, @truncate(self.st_info >> 4)); } }; pub const Elf32_Syminfo = extern struct { @@ -759,10 +759,10 @@ pub const Elf32_Rel = extern struct { r_info: Elf32_Word, pub inline fn r_sym(self: @This()) u24 { - return @truncate(u24, self.r_info >> 8); + return @as(u24, @truncate(self.r_info >> 8)); } pub inline fn r_type(self: @This()) u8 { - return @truncate(u8, self.r_info); + return @as(u8, @truncate(self.r_info)); } }; pub const Elf64_Rel = extern struct { @@ -770,10 +770,10 @@ pub const Elf64_Rel = extern struct { r_info: Elf64_Xword, pub inline fn r_sym(self: @This()) u32 { - return @truncate(u32, self.r_info >> 32); + return @as(u32, @truncate(self.r_info >> 32)); } pub inline fn r_type(self: @This()) u32 { - return @truncate(u32, self.r_info); + return @as(u32, @truncate(self.r_info)); } }; pub const Elf32_Rela = extern struct { @@ -782,10 +782,10 @@ pub const Elf32_Rela = extern struct { r_addend: Elf32_Sword, pub inline fn r_sym(self: @This()) u24 { - return @truncate(u24, self.r_info >> 8); + return @as(u24, @truncate(self.r_info >> 8)); } pub inline fn r_type(self: @This()) u8 { - return @truncate(u8, self.r_info); + return @as(u8, @truncate(self.r_info)); } }; pub const Elf64_Rela = extern struct { @@ -794,10 +794,10 @@ pub const Elf64_Rela = extern struct { r_addend: Elf64_Sxword, pub inline fn r_sym(self: @This()) u32 { - return @truncate(u32, self.r_info >> 32); + return @as(u32, @truncate(self.r_info >> 32)); } pub inline fn r_type(self: @This()) u32 { - return @truncate(u32, self.r_info); + return @as(u32, @truncate(self.r_info)); } }; pub const Elf32_Dyn = extern struct { diff --git a/lib/std/enums.zig b/lib/std/enums.zig index a5ceebc9b1db..9931b1d7c104 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -16,7 +16,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def fields = fields ++ &[_]StructField{.{ .name = field.name, .type = Data, - .default_value = if (field_default) |d| @ptrCast(?*const anyopaque, &d) else null, + .default_value = if (field_default) |d| @as(?*const anyopaque, @ptrCast(&d)) else null, .is_comptime = false, .alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0, }}; @@ -61,7 +61,7 @@ test tagName { const E = enum(u8) { a, b, _ }; try testing.expect(tagName(E, .a) != null); try testing.expectEqualStrings("a", tagName(E, .a).?); - try testing.expect(tagName(E, @enumFromInt(E, 42)) == null); + try testing.expect(tagName(E, @as(E, @enumFromInt(42))) == null); } /// Determines the length of a direct-mapped enum array, indexed by @@ -156,7 +156,7 @@ pub fn directEnumArrayDefault( var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined; inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| { const enum_value = @field(E, f.name); - const index = @intCast(usize, @intFromEnum(enum_value)); + const index = @as(usize, @intCast(@intFromEnum(enum_value))); result[index] = @field(init_values, f.name); } return result; @@ -341,7 +341,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { var self = initWithCount(0); inline for (@typeInfo(E).Enum.fields) |field| { const c = @field(init_counts, field.name); - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); self.counts.set(key, c); } return self; @@ -412,7 +412,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// asserts operation will not overflow any key. pub fn addSetAssertSafe(self: *Self, other: Self) void { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); self.addAssertSafe(key, other.getCount(key)); } } @@ -420,7 +420,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// Increases the all key counts by given multiset. pub fn addSet(self: *Self, other: Self) error{Overflow}!void { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); try self.add(key, other.getCount(key)); } } @@ -430,7 +430,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// then that key will have a key count of zero. pub fn removeSet(self: *Self, other: Self) void { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); self.remove(key, other.getCount(key)); } } @@ -439,7 +439,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// given multiset. pub fn eql(self: Self, other: Self) bool { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); if (self.getCount(key) != other.getCount(key)) { return false; } @@ -451,7 +451,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// equal to the given multiset. pub fn subsetOf(self: Self, other: Self) bool { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); if (self.getCount(key) > other.getCount(key)) { return false; } @@ -463,7 +463,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { /// equal to the given multiset. pub fn supersetOf(self: Self, other: Self) bool { inline for (@typeInfo(E).Enum.fields) |field| { - const key = @enumFromInt(E, field.value); + const key = @as(E, @enumFromInt(field.value)); if (self.getCount(key) < other.getCount(key)) { return false; } @@ -1281,10 +1281,10 @@ test "std.enums.ensureIndexer" { pub const Key = u32; pub const count: usize = 8; pub fn indexOf(k: Key) usize { - return @intCast(usize, k); + return @as(usize, @intCast(k)); } pub fn keyForIndex(index: usize) Key { - return @intCast(Key, index); + return @as(Key, @intCast(index)); } }); } @@ -1323,14 +1323,14 @@ pub fn EnumIndexer(comptime E: type) type { pub const Key = E; pub const count = fields_len; pub fn indexOf(e: E) usize { - return @intCast(usize, @intFromEnum(e) - min); + return @as(usize, @intCast(@intFromEnum(e) - min)); } pub fn keyForIndex(i: usize) E { // TODO fix addition semantics. This calculation // gives up some safety to avoid artificially limiting // the range of signed enum values to max_isize. - const enum_value = if (min < 0) @bitCast(isize, i) +% min else i + min; - return @enumFromInt(E, @intCast(std.meta.Tag(E), enum_value)); + const enum_value = if (min < 0) @as(isize, @bitCast(i)) +% min else i + min; + return @as(E, @enumFromInt(@as(std.meta.Tag(E), @intCast(enum_value)))); } }; } diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig index 9da3943d5d64..8608298c298f 100644 --- a/lib/std/event/lock.zig +++ b/lib/std/event/lock.zig @@ -55,7 +55,7 @@ pub const Lock = struct { const head = switch (self.head) { UNLOCKED => unreachable, LOCKED => null, - else => @ptrFromInt(*Waiter, self.head), + else => @as(*Waiter, @ptrFromInt(self.head)), }; if (head) |h| { @@ -102,7 +102,7 @@ pub const Lock = struct { break :blk null; }, else => { - const waiter = @ptrFromInt(*Waiter, self.lock.head); + const waiter = @as(*Waiter, @ptrFromInt(self.lock.head)); self.lock.head = if (waiter.next == null) LOCKED else @intFromPtr(waiter.next); if (waiter.next) |next| next.tail = waiter.tail; @@ -130,7 +130,7 @@ test "std.event.Lock" { var lock = Lock{}; testLock(&lock); - const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; + const expected_result = [1]i32{3 * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len; try testing.expectEqualSlices(i32, &expected_result, &shared_test_data); } fn testLock(lock: *Lock) void { diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 7eec26a2b175..b5021a5378e4 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -556,7 +556,7 @@ pub const Loop = struct { self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN); }, .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => { - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT); }, else => @compileError("Unsupported OS"), } @@ -568,7 +568,7 @@ pub const Loop = struct { self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT); }, .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => { - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); }, else => @compileError("Unsupported OS"), } @@ -580,8 +580,8 @@ pub const Loop = struct { self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT | os.linux.EPOLL.IN); }, .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => { - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT); - self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT); + self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT); }, else => @compileError("Unsupported OS"), } @@ -1415,7 +1415,7 @@ pub const Loop = struct { var events: [1]os.linux.epoll_event = undefined; const count = os.epoll_wait(self.os_data.epollfd, events[0..], -1); for (events[0..count]) |ev| { - const resume_node = @ptrFromInt(*ResumeNode, ev.data.ptr); + const resume_node = @as(*ResumeNode, @ptrFromInt(ev.data.ptr)); const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { @@ -1439,7 +1439,7 @@ pub const Loop = struct { const empty_kevs = &[0]os.Kevent{}; const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable; for (eventlist[0..count]) |ev| { - const resume_node = @ptrFromInt(*ResumeNode, ev.udata); + const resume_node = @as(*ResumeNode, @ptrFromInt(ev.udata)); const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index c19330d5a922..47ddf74fd5dc 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -223,7 +223,7 @@ test "std.event.RwLock" { _ = testLock(std.heap.page_allocator, &lock); - const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; + const expected_result = [1]i32{shared_it_count * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len; try testing.expectEqualSlices(i32, expected_result, shared_test_data); } fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void { @@ -244,12 +244,12 @@ fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void { } for (write_nodes) |*write_node| { - const casted = @ptrCast(*const @Frame(writeRunner), write_node.data); + const casted = @as(*const @Frame(writeRunner), @ptrCast(write_node.data)); await casted; allocator.destroy(casted); } for (read_nodes) |*read_node| { - const casted = @ptrCast(*const @Frame(readRunner), read_node.data); + const casted = @as(*const @Frame(readRunner), @ptrCast(read_node.data)); await casted; allocator.destroy(casted); } @@ -287,6 +287,6 @@ fn readRunner(lock: *RwLock) callconv(.Async) void { defer handle.release(); try testing.expect(shared_test_index == 0); - try testing.expect(shared_test_data[i] == @intCast(i32, shared_count)); + try testing.expect(shared_test_data[i] == @as(i32, @intCast(shared_count))); } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index d983aba369b4..7af21c86df21 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -396,7 +396,7 @@ pub const ArgState = struct { } // Mark this argument as used - self.used_args |= @as(ArgSetType, 1) << @intCast(u5, next_index); + self.used_args |= @as(ArgSetType, 1) << @as(u5, @intCast(next_index)); return next_index; } }; @@ -1056,7 +1056,7 @@ pub fn formatFloatScientific( options: FormatOptions, writer: anytype, ) !void { - var x = @floatCast(f64, value); + var x = @as(f64, @floatCast(value)); // Errol doesn't handle these special cases. if (math.signbit(x)) { @@ -1167,9 +1167,9 @@ pub fn formatFloatHexadecimal( const exponent_mask = (1 << exponent_bits) - 1; const exponent_bias = (1 << (exponent_bits - 1)) - 1; - const as_bits = @bitCast(TU, value); + const as_bits = @as(TU, @bitCast(value)); var mantissa = as_bits & mantissa_mask; - var exponent: i32 = @truncate(u16, (as_bits >> mantissa_bits) & exponent_mask); + var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask)); const is_denormal = exponent == 0 and mantissa != 0; const is_zero = exponent == 0 and mantissa == 0; @@ -1218,7 +1218,7 @@ pub fn formatFloatHexadecimal( // Drop the excess bits. mantissa >>= 2; // Restore the alignment. - mantissa <<= @intCast(math.Log2Int(TU), (mantissa_digits - precision) * 4); + mantissa <<= @as(math.Log2Int(TU), @intCast((mantissa_digits - precision) * 4)); const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0; // Prefer a normalized result in case of overflow. @@ -1296,7 +1296,7 @@ pub fn formatFloatDecimal( errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Decimal); // exp < 0 means the leading is always 0 as errol result is normalized. - var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; + var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); @@ -1325,7 +1325,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { - const zero_digit_count = @intCast(usize, -float_decimal.exp); + const zero_digit_count = @as(usize, @intCast(-float_decimal.exp)); const zeros_to_print = @min(zero_digit_count, precision); var i: usize = 0; @@ -1354,7 +1354,7 @@ pub fn formatFloatDecimal( } } else { // exp < 0 means the leading is always 0 as errol result is normalized. - var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; + var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); @@ -1380,7 +1380,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp < 0) { - const zero_digit_count = @intCast(usize, -float_decimal.exp); + const zero_digit_count = @as(usize, @intCast(-float_decimal.exp)); var i: usize = 0; while (i < zero_digit_count) : (i += 1) { @@ -1423,21 +1423,21 @@ pub fn formatInt( if (base == 10) { while (a >= 100) : (a = @divTrunc(a, 100)) { index -= 2; - buf[index..][0..2].* = digits2(@intCast(usize, a % 100)); + buf[index..][0..2].* = digits2(@as(usize, @intCast(a % 100))); } if (a < 10) { index -= 1; - buf[index] = '0' + @intCast(u8, a); + buf[index] = '0' + @as(u8, @intCast(a)); } else { index -= 2; - buf[index..][0..2].* = digits2(@intCast(usize, a)); + buf[index..][0..2].* = digits2(@as(usize, @intCast(a))); } } else { while (true) { const digit = a % base; index -= 1; - buf[index] = digitToChar(@intCast(u8, digit), case); + buf[index] = digitToChar(@as(u8, @intCast(digit)), case); a /= base; if (a == 0) break; } @@ -1595,10 +1595,10 @@ test "fmtDuration" { fn formatDurationSigned(ns: i64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { if (ns < 0) { - const data = FormatDurationData{ .ns = @intCast(u64, -ns), .negative = true }; + const data = FormatDurationData{ .ns = @as(u64, @intCast(-ns)), .negative = true }; try formatDuration(data, fmt, options, writer); } else { - const data = FormatDurationData{ .ns = @intCast(u64, ns) }; + const data = FormatDurationData{ .ns = @as(u64, @intCast(ns)) }; try formatDuration(data, fmt, options, writer); } } @@ -1846,7 +1846,7 @@ fn parseWithSign( // The first digit of a negative number. // Consider parsing "-4" as an i3. // This should work, but positive 4 overflows i3, so we can't cast the digit to T and subtract. - x = math.cast(T, -@intCast(i8, digit)) orelse return error.Overflow; + x = math.cast(T, -@as(i8, @intCast(digit))) orelse return error.Overflow; continue; } x = try add(T, x, math.cast(T, digit) orelse return error.Overflow); @@ -2099,7 +2099,7 @@ test "optional" { try expectFmt("optional: null\n", "optional: {?}\n", .{value}); } { - const value = @ptrFromInt(?*i32, 0xf000d000); + const value = @as(?*i32, @ptrFromInt(0xf000d000)); try expectFmt("optional: *i32@f000d000\n", "optional: {*}\n", .{value}); } } @@ -2218,7 +2218,7 @@ test "slice" { } { var runtime_zero: usize = 0; - const value = @ptrFromInt([*]align(1) const []const u8, 0xdeadbeef)[runtime_zero..runtime_zero]; + const value = @as([*]align(1) const []const u8, @ptrFromInt(0xdeadbeef))[runtime_zero..runtime_zero]; try expectFmt("slice: []const u8@deadbeef\n", "slice: {*}\n", .{value}); } { @@ -2248,17 +2248,17 @@ test "escape non-printable" { test "pointer" { { - const value = @ptrFromInt(*align(1) i32, 0xdeadbeef); + const value = @as(*align(1) i32, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: i32@deadbeef\n", "pointer: {}\n", .{value}); try expectFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", .{value}); } const FnPtr = *align(1) const fn () void; { - const value = @ptrFromInt(FnPtr, 0xdeadbeef); + const value = @as(FnPtr, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value}); } { - const value = @ptrFromInt(FnPtr, 0xdeadbeef); + const value = @as(FnPtr, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value}); } } @@ -2267,12 +2267,12 @@ test "cstr" { try expectFmt( "cstr: Test C\n", "cstr: {s}\n", - .{@ptrCast([*c]const u8, "Test C")}, + .{@as([*c]const u8, @ptrCast("Test C"))}, ); try expectFmt( "cstr: Test C\n", "cstr: {s:10}\n", - .{@ptrCast([*c]const u8, "Test C")}, + .{@as([*c]const u8, @ptrCast("Test C"))}, ); } @@ -2360,11 +2360,11 @@ test "non-exhaustive enum" { }; try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {}\n", .{Enum.One}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {}\n", .{Enum.Two}); - try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@enumFromInt(Enum, 0x1234)}); + try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@as(Enum, @enumFromInt(0x1234))}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {x}\n", .{Enum.One}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {x}\n", .{Enum.Two}); try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {X}\n", .{Enum.Two}); - try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@enumFromInt(Enum, 0x1234)}); + try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@as(Enum, @enumFromInt(0x1234))}); } test "float.scientific" { @@ -2376,11 +2376,11 @@ test "float.scientific" { test "float.scientific.precision" { try expectFmt("f64: 1.40971e-42", "f64: {e:.5}", .{@as(f64, 1.409706e-42)}); - try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 814313563)))}); - try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1006632960)))}); + try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 814313563))))}); + try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1006632960))))}); // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05. // In fact, libc doesn't round a lot of 5 cases up when one past the precision point. - try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1203982400)))}); + try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1203982400))))}); } test "float.special" { @@ -2472,22 +2472,22 @@ test "float.decimal" { } test "float.libc.sanity" { - try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 916964781)))}); - try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 925353389)))}); - try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1036831278)))}); - try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1065353133)))}); - try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1092616192)))}); + try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 916964781))))}); + try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 925353389))))}); + try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1036831278))))}); + try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1065353133))))}); + try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1092616192))))}); // libc differences // // This is 0.015625 exactly according to gdb. We thus round down, // however glibc rounds up for some reason. This occurs for all // floats of the form x.yyyy25 on a precision point. - try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1015021568)))}); + try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1015021568))))}); // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3 // also rounds to 630 so I'm inclined to believe libc is not // optimal here. - try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1518338049)))}); + try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1518338049))))}); } test "custom" { diff --git a/lib/std/fmt/errol.zig b/lib/std/fmt/errol.zig index b438733589d0..af686d6448d7 100644 --- a/lib/std/fmt/errol.zig +++ b/lib/std/fmt/errol.zig @@ -29,11 +29,11 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro switch (mode) { RoundMode.Decimal => { if (float_decimal.exp >= 0) { - round_digit = precision + @intCast(usize, float_decimal.exp); + round_digit = precision + @as(usize, @intCast(float_decimal.exp)); } else { // if a small negative exp, then adjust we need to offset by the number // of leading zeros that will occur. - const min_exp_required = @intCast(usize, -float_decimal.exp); + const min_exp_required = @as(usize, @intCast(-float_decimal.exp)); if (precision > min_exp_required) { round_digit = precision - min_exp_required; } @@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro float_decimal.exp += 1; // Re-size the buffer to use the reserved leading byte. - const one_before = @ptrFromInt([*]u8, @intFromPtr(&float_decimal.digits[0]) - 1); + const one_before = @as([*]u8, @ptrFromInt(@intFromPtr(&float_decimal.digits[0]) - 1)); float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1]; float_decimal.digits[0] = '1'; return; @@ -80,7 +80,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro /// Corrected Errol3 double to ASCII conversion. pub fn errol3(value: f64, buffer: []u8) FloatDecimal { - const bits = @bitCast(u64, value); + const bits = @as(u64, @bitCast(value)); const i = tableLowerBound(bits); if (i < enum3.len and enum3[i] == bits) { const data = enum3_data[i]; @@ -113,16 +113,16 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal { // normalize the midpoint const e = math.frexp(val).exponent; - var exp = @intFromFloat(i16, @floor(307 + @floatFromInt(f64, e) * 0.30103)); + var exp = @as(i16, @intFromFloat(@floor(307 + @as(f64, @floatFromInt(e)) * 0.30103))); if (exp < 20) { exp = 20; - } else if (@intCast(usize, exp) >= lookup_table.len) { - exp = @intCast(i16, lookup_table.len - 1); + } else if (@as(usize, @intCast(exp)) >= lookup_table.len) { + exp = @as(i16, @intCast(lookup_table.len - 1)); } - var mid = lookup_table[@intCast(usize, exp)]; + var mid = lookup_table[@as(usize, @intCast(exp))]; mid = hpProd(mid, val); - const lten = lookup_table[@intCast(usize, exp)].val; + const lten = lookup_table[@as(usize, @intCast(exp))].val; exp -= 307; @@ -171,25 +171,25 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal { var buf_index: usize = 0; const bound = buffer.len - 1; while (buf_index < bound) { - var hdig = @intFromFloat(u8, @floor(high.val)); - if ((high.val == @floatFromInt(f64, hdig)) and (high.off < 0)) hdig -= 1; + var hdig = @as(u8, @intFromFloat(@floor(high.val))); + if ((high.val == @as(f64, @floatFromInt(hdig))) and (high.off < 0)) hdig -= 1; - var ldig = @intFromFloat(u8, @floor(low.val)); - if ((low.val == @floatFromInt(f64, ldig)) and (low.off < 0)) ldig -= 1; + var ldig = @as(u8, @intFromFloat(@floor(low.val))); + if ((low.val == @as(f64, @floatFromInt(ldig))) and (low.off < 0)) ldig -= 1; if (ldig != hdig) break; buffer[buf_index] = hdig + '0'; buf_index += 1; - high.val -= @floatFromInt(f64, hdig); - low.val -= @floatFromInt(f64, ldig); + high.val -= @as(f64, @floatFromInt(hdig)); + low.val -= @as(f64, @floatFromInt(ldig)); hpMul10(&high); hpMul10(&low); } const tmp = (high.val + low.val) / 2.0; - var mdig = @intFromFloat(u8, @floor(tmp + 0.5)); - if ((@floatFromInt(f64, mdig) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1; + var mdig = @as(u8, @intFromFloat(@floor(tmp + 0.5))); + if ((@as(f64, @floatFromInt(mdig)) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1; buffer[buf_index] = mdig + '0'; buf_index += 1; @@ -248,9 +248,9 @@ fn split(val: f64, hi: *f64, lo: *f64) void { } fn gethi(in: f64) f64 { - const bits = @bitCast(u64, in); + const bits = @as(u64, @bitCast(in)); const new_bits = bits & 0xFFFFFFFFF8000000; - return @bitCast(f64, new_bits); + return @as(f64, @bitCast(new_bits)); } /// Normalize the number by factoring in the error. @@ -303,21 +303,21 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { assert((val > 9.007199254740992e15) and val < (3.40282366920938e38)); - var mid = @intFromFloat(u128, val); + var mid = @as(u128, @intFromFloat(val)); var low: u128 = mid - fpeint((fpnext(val) - val) / 2.0); var high: u128 = mid + fpeint((val - fpprev(val)) / 2.0); - if (@bitCast(u64, val) & 0x1 != 0) { + if (@as(u64, @bitCast(val)) & 0x1 != 0) { high -= 1; } else { low -= 1; } - var l64 = @intCast(u64, low % pow19); - const lf = @intCast(u64, (low / pow19) % pow19); + var l64 = @as(u64, @intCast(low % pow19)); + const lf = @as(u64, @intCast((low / pow19) % pow19)); - var h64 = @intCast(u64, high % pow19); - const hf = @intCast(u64, (high / pow19) % pow19); + var h64 = @as(u64, @intCast(high % pow19)); + const hf = @as(u64, @intCast((high / pow19) % pow19)); if (lf != hf) { l64 = lf; @@ -333,7 +333,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { x *= 10; } } - const m64 = @truncate(u64, @divTrunc(mid, x)); + const m64 = @as(u64, @truncate(@divTrunc(mid, x))); if (lf != hf) mi += 19; @@ -349,7 +349,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { return FloatDecimal{ .digits = buffer[0..buf_index], - .exp = @intCast(i32, buf_index) + mi, + .exp = @as(i32, @intCast(buf_index)) + mi, }; } @@ -360,33 +360,33 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal { fn errolFixed(val: f64, buffer: []u8) FloatDecimal { assert((val >= 16.0) and (val < 9.007199254740992e15)); - const u = @intFromFloat(u64, val); - const n = @floatFromInt(f64, u); + const u = @as(u64, @intFromFloat(val)); + const n = @as(f64, @floatFromInt(u)); var mid = val - n; var lo = ((fpprev(val) - n) + mid) / 2.0; var hi = ((fpnext(val) - n) + mid) / 2.0; var buf_index = u64toa(u, buffer); - var exp = @intCast(i32, buf_index); + var exp = @as(i32, @intCast(buf_index)); var j = buf_index; buffer[j] = 0; if (mid != 0.0) { while (mid != 0.0) { lo *= 10.0; - const ldig = @intFromFloat(i32, lo); - lo -= @floatFromInt(f64, ldig); + const ldig = @as(i32, @intFromFloat(lo)); + lo -= @as(f64, @floatFromInt(ldig)); mid *= 10.0; - const mdig = @intFromFloat(i32, mid); - mid -= @floatFromInt(f64, mdig); + const mdig = @as(i32, @intFromFloat(mid)); + mid -= @as(f64, @floatFromInt(mdig)); hi *= 10.0; - const hdig = @intFromFloat(i32, hi); - hi -= @floatFromInt(f64, hdig); + const hdig = @as(i32, @intFromFloat(hi)); + hi -= @as(f64, @floatFromInt(hdig)); - buffer[j] = @intCast(u8, mdig + '0'); + buffer[j] = @as(u8, @intCast(mdig + '0')); j += 1; if (hdig != ldig or j > 50) break; @@ -413,11 +413,11 @@ fn errolFixed(val: f64, buffer: []u8) FloatDecimal { } fn fpnext(val: f64) f64 { - return @bitCast(f64, @bitCast(u64, val) +% 1); + return @as(f64, @bitCast(@as(u64, @bitCast(val)) +% 1)); } fn fpprev(val: f64) f64 { - return @bitCast(f64, @bitCast(u64, val) -% 1); + return @as(f64, @bitCast(@as(u64, @bitCast(val)) -% 1)); } pub const c_digits_lut = [_]u8{ @@ -453,7 +453,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize { var buf_index: usize = 0; if (value < kTen8) { - const v = @intCast(u32, value); + const v = @as(u32, @intCast(value)); if (v < 10000) { const d1: u32 = (v / 100) << 1; const d2: u32 = (v % 100) << 1; @@ -508,8 +508,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buf_index += 1; } } else if (value < kTen16) { - const v0: u32 = @intCast(u32, value / kTen8); - const v1: u32 = @intCast(u32, value % kTen8); + const v0: u32 = @as(u32, @intCast(value / kTen8)); + const v1: u32 = @as(u32, @intCast(value % kTen8)); const b0: u32 = v0 / 10000; const c0: u32 = v0 % 10000; @@ -579,11 +579,11 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buffer[buf_index] = c_digits_lut[d8 + 1]; buf_index += 1; } else { - const a = @intCast(u32, value / kTen16); // 1 to 1844 + const a = @as(u32, @intCast(value / kTen16)); // 1 to 1844 value %= kTen16; if (a < 10) { - buffer[buf_index] = '0' + @intCast(u8, a); + buffer[buf_index] = '0' + @as(u8, @intCast(a)); buf_index += 1; } else if (a < 100) { const i: u32 = a << 1; @@ -592,7 +592,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buffer[buf_index] = c_digits_lut[i + 1]; buf_index += 1; } else if (a < 1000) { - buffer[buf_index] = '0' + @intCast(u8, a / 100); + buffer[buf_index] = '0' + @as(u8, @intCast(a / 100)); buf_index += 1; const i: u32 = (a % 100) << 1; @@ -613,8 +613,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize { buf_index += 1; } - const v0 = @intCast(u32, value / kTen8); - const v1 = @intCast(u32, value % kTen8); + const v0 = @as(u32, @intCast(value / kTen8)); + const v1 = @as(u32, @intCast(value % kTen8)); const b0: u32 = v0 / 10000; const c0: u32 = v0 % 10000; @@ -672,10 +672,10 @@ fn u64toa(value_param: u64, buffer: []u8) usize { } fn fpeint(from: f64) u128 { - const bits = @bitCast(u64, from); + const bits = @as(u64, @bitCast(from)); assert((bits & ((1 << 52) - 1)) == 0); - return @as(u128, 1) << @truncate(u7, (bits >> 52) -% 1023); + return @as(u128, 1) << @as(u7, @truncate((bits >> 52) -% 1023)); } /// Given two different integers with the same length in terms of the number diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig index b14fe5ca3c73..98fbe28032aa 100644 --- a/lib/std/fmt/parse_float.zig +++ b/lib/std/fmt/parse_float.zig @@ -78,7 +78,7 @@ test "fmt.parseFloat nan and inf" { inline for ([_]type{ f16, f32, f64, f128 }) |T| { const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - try expectEqual(@bitCast(Z, try parseFloat(T, "nAn")), @bitCast(Z, std.math.nan(T))); + try expectEqual(@as(Z, @bitCast(try parseFloat(T, "nAn"))), @as(Z, @bitCast(std.math.nan(T)))); try expectEqual(try parseFloat(T, "inF"), std.math.inf(T)); try expectEqual(try parseFloat(T, "-INF"), -std.math.inf(T)); } diff --git a/lib/std/fmt/parse_float/common.zig b/lib/std/fmt/parse_float/common.zig index c1b34a081bc3..8dba3b449847 100644 --- a/lib/std/fmt/parse_float/common.zig +++ b/lib/std/fmt/parse_float/common.zig @@ -32,7 +32,7 @@ pub fn BiasedFp(comptime T: type) type { pub fn toFloat(self: Self, comptime FloatT: type, negative: bool) FloatT { var word = self.f; - word |= @intCast(MantissaT, self.e) << std.math.floatMantissaBits(FloatT); + word |= @as(MantissaT, @intCast(self.e)) << std.math.floatMantissaBits(FloatT); var f = floatFromUnsigned(FloatT, MantissaT, word); if (negative) f = -f; return f; @@ -42,10 +42,10 @@ pub fn BiasedFp(comptime T: type) type { pub fn floatFromUnsigned(comptime T: type, comptime MantissaT: type, v: MantissaT) T { return switch (T) { - f16 => @bitCast(f16, @truncate(u16, v)), - f32 => @bitCast(f32, @truncate(u32, v)), - f64 => @bitCast(f64, @truncate(u64, v)), - f128 => @bitCast(f128, v), + f16 => @as(f16, @bitCast(@as(u16, @truncate(v)))), + f32 => @as(f32, @bitCast(@as(u32, @truncate(v)))), + f64 => @as(f64, @bitCast(@as(u64, @truncate(v)))), + f128 => @as(f128, @bitCast(v)), else => unreachable, }; } diff --git a/lib/std/fmt/parse_float/convert_eisel_lemire.zig b/lib/std/fmt/parse_float/convert_eisel_lemire.zig index 5c49553a14d7..6831a308ea12 100644 --- a/lib/std/fmt/parse_float/convert_eisel_lemire.zig +++ b/lib/std/fmt/parse_float/convert_eisel_lemire.zig @@ -36,7 +36,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) { } // Normalize our significant digits, so the most-significant bit is set. - const lz = @clz(@bitCast(u64, w)); + const lz = @clz(@as(u64, @bitCast(w))); w = math.shl(u64, w, lz); const r = computeProductApprox(q, w, float_info.mantissa_explicit_bits + 3); @@ -62,9 +62,9 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) { } } - const upper_bit = @intCast(i32, r.hi >> 63); - var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3); - var power2 = power(@intCast(i32, q)) + upper_bit - @intCast(i32, lz) - float_info.minimum_exponent; + const upper_bit = @as(i32, @intCast(r.hi >> 63)); + var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3); + var power2 = power(@as(i32, @intCast(q))) + upper_bit - @as(i32, @intCast(lz)) - float_info.minimum_exponent; if (power2 <= 0) { if (-power2 + 1 >= 64) { // Have more than 64 bits below the minimum exponent, must be 0. @@ -93,7 +93,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) { q >= float_info.min_exponent_round_to_even and q <= float_info.max_exponent_round_to_even and mantissa & 3 == 1 and - math.shl(u64, mantissa, (upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3)) == r.hi) + math.shl(u64, mantissa, (upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3)) == r.hi) { // Zero the lowest bit, so we don't round up. mantissa &= ~@as(u64, 1); @@ -139,8 +139,8 @@ const U128 = struct { pub fn mul(a: u64, b: u64) U128 { const x = @as(u128, a) * b; return .{ - .hi = @truncate(u64, x >> 64), - .lo = @truncate(u64, x), + .hi = @as(u64, @truncate(x >> 64)), + .lo = @as(u64, @truncate(x)), }; } }; @@ -161,7 +161,7 @@ fn computeProductApprox(q: i64, w: u64, comptime precision: usize) U128 { // 5^q < 2^64, then the multiplication always provides an exact value. // That means whenever we need to round ties to even, we always have // an exact value. - const index = @intCast(usize, q - @intCast(i64, eisel_lemire_smallest_power_of_five)); + const index = @as(usize, @intCast(q - @as(i64, @intCast(eisel_lemire_smallest_power_of_five)))); const pow5 = eisel_lemire_table_powers_of_five_128[index]; // Only need one multiplication as long as there is 1 zero but diff --git a/lib/std/fmt/parse_float/convert_fast.zig b/lib/std/fmt/parse_float/convert_fast.zig index 2124e436ab42..a148d3946f63 100644 --- a/lib/std/fmt/parse_float/convert_fast.zig +++ b/lib/std/fmt/parse_float/convert_fast.zig @@ -108,19 +108,19 @@ pub fn convertFast(comptime T: type, n: Number(T)) ?T { var value: T = 0; if (n.exponent <= info.max_exponent_fast_path) { // normal fast path - value = @floatFromInt(T, n.mantissa); + value = @as(T, @floatFromInt(n.mantissa)); value = if (n.exponent < 0) - value / fastPow10(T, @intCast(usize, -n.exponent)) + value / fastPow10(T, @as(usize, @intCast(-n.exponent))) else - value * fastPow10(T, @intCast(usize, n.exponent)); + value * fastPow10(T, @as(usize, @intCast(n.exponent))); } else { // disguised fast path const shift = n.exponent - info.max_exponent_fast_path; - const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @intCast(usize, shift))) catch return null; + const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @as(usize, @intCast(shift)))) catch return null; if (mantissa > info.max_mantissa_fast_path) { return null; } - value = @floatFromInt(T, mantissa) * fastPow10(T, info.max_exponent_fast_path); + value = @as(T, @floatFromInt(mantissa)) * fastPow10(T, info.max_exponent_fast_path); } if (n.negative) { diff --git a/lib/std/fmt/parse_float/convert_hex.zig b/lib/std/fmt/parse_float/convert_hex.zig index 3b3f797216a7..815331347c0f 100644 --- a/lib/std/fmt/parse_float/convert_hex.zig +++ b/lib/std/fmt/parse_float/convert_hex.zig @@ -81,7 +81,7 @@ pub fn convertHex(comptime T: type, n_: Number(T)) T { } var bits = n.mantissa & ((1 << mantissa_bits) - 1); - bits |= @intCast(MantissaT, (n.exponent - exp_bias) & ((1 << exp_bits) - 1)) << mantissa_bits; + bits |= @as(MantissaT, @intCast((n.exponent - exp_bias) & ((1 << exp_bits) - 1))) << mantissa_bits; if (n.negative) { bits |= 1 << (mantissa_bits + exp_bits); } diff --git a/lib/std/fmt/parse_float/convert_slow.zig b/lib/std/fmt/parse_float/convert_slow.zig index 225a1e208c2f..53cb12ef1370 100644 --- a/lib/std/fmt/parse_float/convert_slow.zig +++ b/lib/std/fmt/parse_float/convert_slow.zig @@ -48,13 +48,13 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) { var exp2: i32 = 0; // Shift right toward (1/2 .. 1] while (d.decimal_point > 0) { - const n = @intCast(usize, d.decimal_point); + const n = @as(usize, @intCast(d.decimal_point)); const shift = getShift(n); d.rightShift(shift); if (d.decimal_point < -Decimal(T).decimal_point_range) { return BiasedFp(T).zero(); } - exp2 += @intCast(i32, shift); + exp2 += @as(i32, @intCast(shift)); } // Shift left toward (1/2 .. 1] while (d.decimal_point <= 0) { @@ -66,7 +66,7 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) { else => 1, }; } else { - const n = @intCast(usize, -d.decimal_point); + const n = @as(usize, @intCast(-d.decimal_point)); break :blk getShift(n); } }; @@ -74,17 +74,17 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) { if (d.decimal_point > Decimal(T).decimal_point_range) { return BiasedFp(T).inf(T); } - exp2 -= @intCast(i32, shift); + exp2 -= @as(i32, @intCast(shift)); } // We are now in the range [1/2 .. 1] but the binary format uses [1 .. 2] exp2 -= 1; while (min_exponent + 1 > exp2) { - var n = @intCast(usize, (min_exponent + 1) - exp2); + var n = @as(usize, @intCast((min_exponent + 1) - exp2)); if (n > max_shift) { n = max_shift; } d.rightShift(n); - exp2 += @intCast(i32, n); + exp2 += @as(i32, @intCast(n)); } if (exp2 - min_exponent >= infinite_power) { return BiasedFp(T).inf(T); diff --git a/lib/std/fmt/parse_float/decimal.zig b/lib/std/fmt/parse_float/decimal.zig index 5bb5fa8d5e32..f8d736a065a7 100644 --- a/lib/std/fmt/parse_float/decimal.zig +++ b/lib/std/fmt/parse_float/decimal.zig @@ -114,7 +114,7 @@ pub fn Decimal(comptime T: type) type { return math.maxInt(MantissaT); } - const dp = @intCast(usize, self.decimal_point); + const dp = @as(usize, @intCast(self.decimal_point)); var n: MantissaT = 0; var i: usize = 0; @@ -155,7 +155,7 @@ pub fn Decimal(comptime T: type) type { const quotient = n / 10; const remainder = n - (10 * quotient); if (write_index < max_digits) { - self.digits[write_index] = @intCast(u8, remainder); + self.digits[write_index] = @as(u8, @intCast(remainder)); } else if (remainder > 0) { self.truncated = true; } @@ -167,7 +167,7 @@ pub fn Decimal(comptime T: type) type { const quotient = n / 10; const remainder = n - (10 * quotient); if (write_index < max_digits) { - self.digits[write_index] = @intCast(u8, remainder); + self.digits[write_index] = @as(u8, @intCast(remainder)); } else if (remainder > 0) { self.truncated = true; } @@ -178,7 +178,7 @@ pub fn Decimal(comptime T: type) type { if (self.num_digits > max_digits) { self.num_digits = max_digits; } - self.decimal_point += @intCast(i32, num_new_digits); + self.decimal_point += @as(i32, @intCast(num_new_digits)); self.trim(); } @@ -202,7 +202,7 @@ pub fn Decimal(comptime T: type) type { } } - self.decimal_point -= @intCast(i32, read_index) - 1; + self.decimal_point -= @as(i32, @intCast(read_index)) - 1; if (self.decimal_point < -decimal_point_range) { self.num_digits = 0; self.decimal_point = 0; @@ -212,14 +212,14 @@ pub fn Decimal(comptime T: type) type { const mask = math.shl(MantissaT, 1, shift) - 1; while (read_index < self.num_digits) { - const new_digit = @intCast(u8, math.shr(MantissaT, n, shift)); + const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift))); n = (10 * (n & mask)) + self.digits[read_index]; read_index += 1; self.digits[write_index] = new_digit; write_index += 1; } while (n > 0) { - const new_digit = @intCast(u8, math.shr(MantissaT, n, shift)); + const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift))); n = 10 * (n & mask); if (write_index < max_digits) { self.digits[write_index] = new_digit; @@ -268,7 +268,7 @@ pub fn Decimal(comptime T: type) type { while (stream.scanDigit(10)) |digit| { d.tryAddDigit(digit); } - d.decimal_point = @intCast(i32, marker) - @intCast(i32, stream.offsetTrue()); + d.decimal_point = @as(i32, @intCast(marker)) - @as(i32, @intCast(stream.offsetTrue())); } if (d.num_digits != 0) { // Ignore trailing zeros if any @@ -284,9 +284,9 @@ pub fn Decimal(comptime T: type) type { i -= 1; if (i == 0) break; } - d.decimal_point += @intCast(i32, n_trailing_zeros); + d.decimal_point += @as(i32, @intCast(n_trailing_zeros)); d.num_digits -= n_trailing_zeros; - d.decimal_point += @intCast(i32, d.num_digits); + d.decimal_point += @as(i32, @intCast(d.num_digits)); if (d.num_digits > max_digits) { d.truncated = true; d.num_digits = max_digits; diff --git a/lib/std/fmt/parse_float/parse.zig b/lib/std/fmt/parse_float/parse.zig index 9f6e75b29a59..a31df31312eb 100644 --- a/lib/std/fmt/parse_float/parse.zig +++ b/lib/std/fmt/parse_float/parse.zig @@ -21,7 +21,7 @@ fn parse8Digits(v_: u64) u64 { v = (v * 10) + (v >> 8); // will not overflow, fits in 63 bits const v1 = (v & mask) *% mul1; const v2 = ((v >> 16) & mask) *% mul2; - return @as(u64, @truncate(u32, (v1 +% v2) >> 32)); + return @as(u64, @as(u32, @truncate((v1 +% v2) >> 32))); } /// Parse digits until a non-digit character is found. @@ -106,7 +106,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool var mantissa: MantissaT = 0; tryParseDigits(MantissaT, stream, &mantissa, info.base); var int_end = stream.offsetTrue(); - var n_digits = @intCast(isize, stream.offsetTrue()); + var n_digits = @as(isize, @intCast(stream.offsetTrue())); // the base being 16 implies a 0x prefix, which shouldn't be included in the digit count if (info.base == 16) n_digits -= 2; @@ -117,8 +117,8 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool const marker = stream.offsetTrue(); tryParseDigits(MantissaT, stream, &mantissa, info.base); const n_after_dot = stream.offsetTrue() - marker; - exponent = -@intCast(i64, n_after_dot); - n_digits += @intCast(isize, n_after_dot); + exponent = -@as(i64, @intCast(n_after_dot)); + n_digits += @as(isize, @intCast(n_after_dot)); } // adjust required shift to offset mantissa for base-16 (2^4) @@ -163,7 +163,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool // '0' = '.' + 2 const next = stream.firstUnchecked(); if (next != '_') { - n_digits -= @intCast(isize, next -| ('0' - 1)); + n_digits -= @as(isize, @intCast(next -| ('0' - 1))); } else { stream.underscore_count += 1; } @@ -179,7 +179,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool exponent = blk: { if (mantissa >= min_n_digit_int(MantissaT, info.max_mantissa_digits)) { // big int - break :blk @intCast(i64, int_end) - @intCast(i64, stream.offsetTrue()); + break :blk @as(i64, @intCast(int_end)) - @as(i64, @intCast(stream.offsetTrue())); } else { // the next byte must be present and be '.' // We know this is true because we had more than 19 @@ -190,7 +190,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool stream.advance(1); var marker = stream.offsetTrue(); tryParseNDigits(MantissaT, stream, &mantissa, info.base, info.max_mantissa_digits); - break :blk @intCast(i64, marker) - @intCast(i64, stream.offsetTrue()); + break :blk @as(i64, @intCast(marker)) - @as(i64, @intCast(stream.offsetTrue())); } }; // add back the explicit part diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 8e828fd33498..cb6ce2032ec7 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -373,13 +373,13 @@ pub const IterableDir = struct { } } self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const darwin_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const darwin_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + darwin_entry.reclen(); self.index = next_index; - const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen]; + const name = @as([*]u8, @ptrCast(&darwin_entry.d_name))[0..darwin_entry.d_namlen]; if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (darwin_entry.d_ino == 0)) { continue :start_over; @@ -421,13 +421,13 @@ pub const IterableDir = struct { } if (rc == 0) return null; self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + entry.reclen(); self.index = next_index; - const name = mem.sliceTo(@ptrCast([*:0]u8, &entry.d_name), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&entry.d_name)), 0); if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) continue :start_over; @@ -485,13 +485,13 @@ pub const IterableDir = struct { } if (rc == 0) return null; self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const bsd_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const bsd_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + bsd_entry.reclen(); self.index = next_index; - const name = @ptrCast([*]u8, &bsd_entry.d_name)[0..bsd_entry.d_namlen]; + const name = @as([*]u8, @ptrCast(&bsd_entry.d_name))[0..bsd_entry.d_namlen]; const skip_zero_fileno = switch (builtin.os.tag) { // d_fileno=0 is used to mark invalid entries or deleted files. @@ -567,12 +567,12 @@ pub const IterableDir = struct { } } self.index = 0; - self.end_index = @intCast(usize, rc); + self.end_index = @as(usize, @intCast(rc)); } - const haiku_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const haiku_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + haiku_entry.reclen(); self.index = next_index; - const name = mem.sliceTo(@ptrCast([*:0]u8, &haiku_entry.d_name), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&haiku_entry.d_name)), 0); if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (haiku_entry.d_ino == 0)) { continue :start_over; @@ -672,11 +672,11 @@ pub const IterableDir = struct { self.index = 0; self.end_index = rc; } - const linux_entry = @ptrCast(*align(1) linux.dirent64, &self.buf[self.index]); + const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index])); const next_index = self.index + linux_entry.reclen(); self.index = next_index; - const name = mem.sliceTo(@ptrCast([*:0]u8, &linux_entry.d_name), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.d_name)), 0); // skip . and .. entries if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) { @@ -750,15 +750,14 @@ pub const IterableDir = struct { } } - const aligned_ptr = @alignCast(@alignOf(w.FILE_BOTH_DIR_INFORMATION), &self.buf[self.index]); - const dir_info = @ptrCast(*w.FILE_BOTH_DIR_INFORMATION, aligned_ptr); + const dir_info: *w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&self.buf[self.index])); if (dir_info.NextEntryOffset != 0) { self.index += dir_info.NextEntryOffset; } else { self.index = self.buf.len; } - const name_utf16le = @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2]; + const name_utf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2]; if (mem.eql(u16, name_utf16le, &[_]u16{'.'}) or mem.eql(u16, name_utf16le, &[_]u16{ '.', '.' })) continue; @@ -835,7 +834,7 @@ pub const IterableDir = struct { self.index = 0; self.end_index = bufused; } - const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]); + const entry = @as(*align(1) w.dirent_t, @ptrCast(&self.buf[self.index])); const entry_size = @sizeOf(w.dirent_t); const name_index = self.index + entry_size; if (name_index + entry.d_namlen > self.end_index) { @@ -1789,7 +1788,7 @@ pub const Dir = struct { .fd = undefined, }; - const path_len_bytes = @intCast(u16, mem.sliceTo(sub_path_w, 0).len * 2); + const path_len_bytes = @as(u16, @intCast(mem.sliceTo(sub_path_w, 0).len * 2)); var nt_name = w.UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig index 0c6e8a24f7cb..e9448aa5d317 100644 --- a/lib/std/fs/file.zig +++ b/lib/std/fs/file.zig @@ -368,7 +368,7 @@ pub const File = struct { return Stat{ .inode = st.ino, - .size = @bitCast(u64, st.size), + .size = @as(u64, @bitCast(st.size)), .mode = st.mode, .kind = kind, .atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec, @@ -398,7 +398,7 @@ pub const File = struct { } return Stat{ .inode = info.InternalInformation.IndexNumber, - .size = @bitCast(u64, info.StandardInformation.EndOfFile), + .size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)), .mode = 0, .kind = if (info.StandardInformation.Directory == 0) .file else .directory, .atime = windows.fromSysTime(info.BasicInformation.LastAccessTime), @@ -650,7 +650,7 @@ pub const File = struct { /// Returns the size of the file pub fn size(self: Self) u64 { - return @intCast(u64, self.stat.size); + return @as(u64, @intCast(self.stat.size)); } /// Returns a `Permissions` struct, representing the permissions on the file @@ -855,7 +855,7 @@ pub const File = struct { if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) { var reparse_buf: [windows.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined; try windows.DeviceIoControl(self.handle, windows.FSCTL_GET_REPARSE_POINT, null, reparse_buf[0..]); - const reparse_struct = @ptrCast(*const windows.REPARSE_DATA_BUFFER, @alignCast(@alignOf(windows.REPARSE_DATA_BUFFER), &reparse_buf[0])); + const reparse_struct: *const windows.REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0])); break :reparse_blk reparse_struct.ReparseTag; } break :reparse_blk 0; @@ -864,7 +864,7 @@ pub const File = struct { break :blk MetadataWindows{ .attributes = info.BasicInformation.FileAttributes, .reparse_tag = reparse_tag, - ._size = @bitCast(u64, info.StandardInformation.EndOfFile), + ._size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)), .access_time = windows.fromSysTime(info.BasicInformation.LastAccessTime), .modified_time = windows.fromSysTime(info.BasicInformation.LastWriteTime), .creation_time = windows.fromSysTime(info.BasicInformation.CreationTime), @@ -881,16 +881,16 @@ pub const File = struct { .NOSYS => { const st = try os.fstat(self.handle); - stx.mode = @intCast(u16, st.mode); + stx.mode = @as(u16, @intCast(st.mode)); // Hacky conversion from timespec to statx_timestamp stx.atime = std.mem.zeroes(os.linux.statx_timestamp); stx.atime.tv_sec = st.atim.tv_sec; - stx.atime.tv_nsec = @intCast(u32, st.atim.tv_nsec); // Guaranteed to succeed (tv_nsec is always below 10^9) + stx.atime.tv_nsec = @as(u32, @intCast(st.atim.tv_nsec)); // Guaranteed to succeed (tv_nsec is always below 10^9) stx.mtime = std.mem.zeroes(os.linux.statx_timestamp); stx.mtime.tv_sec = st.mtim.tv_sec; - stx.mtime.tv_nsec = @intCast(u32, st.mtim.tv_nsec); + stx.mtime.tv_nsec = @as(u32, @intCast(st.mtim.tv_nsec)); stx.mask = os.linux.STATX_BASIC_STATS | os.linux.STATX_MTIME; }, @@ -1414,7 +1414,7 @@ pub const File = struct { amt = try os.sendfile(out_fd, in_fd, offset + off, count - off, zero_iovec, trailers, flags); off += amt; } - amt = @intCast(usize, off - count); + amt = @as(usize, @intCast(off - count)); } var i: usize = 0; while (i < trailers.len) { diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig index 4f7ba9af6238..2f599c32130e 100644 --- a/lib/std/fs/get_app_data_dir.zig +++ b/lib/std/fs/get_app_data_dir.zig @@ -23,7 +23,7 @@ pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDi &dir_path_ptr, )) { os.windows.S_OK => { - defer os.windows.ole32.CoTaskMemFree(@ptrCast(*anyopaque, dir_path_ptr)); + defer os.windows.ole32.CoTaskMemFree(@as(*anyopaque, @ptrCast(dir_path_ptr))); const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.sliceTo(dir_path_ptr, 0)) catch |err| switch (err) { error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index 75c9b1df78bf..7ed7a75ea9f7 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -17,7 +17,7 @@ pub const Preopens = struct { pub fn find(p: Preopens, name: []const u8) ?os.fd_t { for (p.names, 0..) |elem_name, i| { if (mem.eql(u8, elem_name, name)) { - return @intCast(os.fd_t, i); + return @as(os.fd_t, @intCast(i)); } } return null; @@ -34,7 +34,7 @@ pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens { names.appendAssumeCapacity("stdout"); // 1 names.appendAssumeCapacity("stderr"); // 2 while (true) { - const fd = @intCast(wasi.fd_t, names.items.len); + const fd = @as(wasi.fd_t, @intCast(names.items.len)); var prestat: prestat_t = undefined; switch (wasi.fd_prestat_get(fd, &prestat)) { .SUCCESS => {}, diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig index 0deaa86468c0..280c8888e6ea 100644 --- a/lib/std/fs/watch.zig +++ b/lib/std/fs/watch.zig @@ -279,7 +279,7 @@ pub fn Watch(comptime V: type) type { while (!put.cancelled) { kev.* = os.Kevent{ - .ident = @intCast(usize, fd), + .ident = @as(usize, @intCast(fd)), .filter = os.EVFILT_VNODE, .flags = os.EV_ADD | os.EV_ENABLE | os.EV_CLEAR | os.EV_ONESHOT | os.NOTE_WRITE | os.NOTE_DELETE | os.NOTE_REVOKE, @@ -487,14 +487,14 @@ pub fn Watch(comptime V: type) type { var ptr: [*]u8 = &event_buf; const end_ptr = ptr + bytes_transferred; while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) { - const ev = @ptrCast(*const windows.FILE_NOTIFY_INFORMATION, ptr); + const ev = @as(*const windows.FILE_NOTIFY_INFORMATION, @ptrCast(ptr)); const emit = switch (ev.Action) { windows.FILE_ACTION_REMOVED => WatchEventId.Delete, windows.FILE_ACTION_MODIFIED => .CloseWrite, else => null, }; if (emit) |id| { - const basename_ptr = @ptrCast([*]u16, ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION)); + const basename_ptr = @as([*]u16, @ptrCast(ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION))); const basename_utf16le = basename_ptr[0 .. ev.FileNameLength / 2]; var basename_data: [std.fs.MAX_PATH_BYTES]u8 = undefined; const basename = basename_data[0 .. std.unicode.utf16leToUtf8(&basename_data, basename_utf16le) catch unreachable]; @@ -510,7 +510,7 @@ pub fn Watch(comptime V: type) type { } if (ev.NextEntryOffset == 0) break; - ptr = @alignCast(@alignOf(windows.FILE_NOTIFY_INFORMATION), ptr + ev.NextEntryOffset); + ptr = @alignCast(ptr + ev.NextEntryOffset); } } } @@ -586,10 +586,10 @@ pub fn Watch(comptime V: type) type { var ptr: [*]u8 = &event_buf; const end_ptr = ptr + bytes_read; while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) { - const ev = @ptrCast(*const os.linux.inotify_event, ptr); + const ev = @as(*const os.linux.inotify_event, @ptrCast(ptr)); if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) { const basename_ptr = ptr + @sizeOf(os.linux.inotify_event); - const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr)); + const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr))); const dir = &self.os_data.wd_table.get(ev.wd).?; if (dir.file_table.getEntry(basename)) |file_value| { @@ -615,7 +615,7 @@ pub fn Watch(comptime V: type) type { } else if (ev.mask & os.linux.IN_DELETE == os.linux.IN_DELETE) { // File or directory was removed or deleted const basename_ptr = ptr + @sizeOf(os.linux.inotify_event); - const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr)); + const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr))); const dir = &self.os_data.wd_table.get(ev.wd).?; if (dir.file_table.getEntry(basename)) |file_value| { @@ -628,7 +628,7 @@ pub fn Watch(comptime V: type) type { } } - ptr = @alignCast(@alignOf(os.linux.inotify_event), ptr + @sizeOf(os.linux.inotify_event) + ev.len); + ptr = @alignCast(ptr + @sizeOf(os.linux.inotify_event) + ev.len); } } } diff --git a/lib/std/hash/adler.zig b/lib/std/hash/adler.zig index 78f52b539b21..200dc9aafec6 100644 --- a/lib/std/hash/adler.zig +++ b/lib/std/hash/adler.zig @@ -118,7 +118,7 @@ test "adler32 very long with variation" { var i: usize = 0; while (i < result.len) : (i += 1) { - result[i] = @truncate(u8, i); + result[i] = @as(u8, @truncate(i)); } break :blk result; diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index f33bd635fc5c..c5c6c585ebad 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -92,10 +92,10 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { // Help the optimizer see that hashing an int is easy by inlining! // TODO Check if the situation is better after #561 is resolved. .Int => |int| switch (int.signedness) { - .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{ + .signed => hash(hasher, @as(@Type(.{ .Int = .{ .bits = int.bits, .signedness = .unsigned, - } }), key), strat), + } }), @bitCast(key)), strat), .unsigned => { if (comptime meta.trait.hasUniqueRepresentation(Key)) { @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index 62df89f0ae23..699de5ceb4f5 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -122,13 +122,13 @@ pub fn benchmarkHash(comptime H: anytype, bytes: usize, allocator: std.mem.Alloc for (0..blocks_count) |i| { h.update(blocks[i * alignment ..][0..block_size]); } - const final = if (H.has_crypto_api) @truncate(u64, h.finalInt()) else h.final(); + const final = if (H.has_crypto_api) @as(u64, @truncate(h.finalInt())) else h.final(); std.mem.doNotOptimizeAway(final); const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); return Result{ .hash = final, @@ -152,7 +152,7 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize const final = blk: { if (H.init_u8s) |init| { if (H.has_crypto_api) { - break :blk @truncate(u64, H.ty.toInt(small_key, init[0..H.ty.key_length])); + break :blk @as(u64, @truncate(H.ty.toInt(small_key, init[0..H.ty.key_length]))); } else { break :blk H.ty.hash(init, small_key); } @@ -166,8 +166,8 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); std.mem.doNotOptimizeAway(sum); diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig index d0884b135f57..8040c99b8425 100644 --- a/lib/std/hash/cityhash.zig +++ b/lib/std/hash/cityhash.zig @@ -2,7 +2,7 @@ const std = @import("std"); inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 { // ptr + offset doesn't work at comptime so we need this instead. - return @ptrCast([*]const u8, &ptr[offset]); + return @as([*]const u8, @ptrCast(&ptr[offset])); } fn fetch32(ptr: [*]const u8, offset: usize) u32 { @@ -49,18 +49,18 @@ pub const CityHash32 = struct { } fn hash32Len0To4(str: []const u8) u32 { - const len: u32 = @truncate(u32, str.len); + const len: u32 = @as(u32, @truncate(str.len)); var b: u32 = 0; var c: u32 = 9; for (str) |v| { - b = b *% c1 +% @bitCast(u32, @intCast(i32, @bitCast(i8, v))); + b = b *% c1 +% @as(u32, @bitCast(@as(i32, @intCast(@as(i8, @bitCast(v)))))); c ^= b; } return fmix(mur(b, mur(len, c))); } fn hash32Len5To12(str: []const u8) u32 { - var a: u32 = @truncate(u32, str.len); + var a: u32 = @as(u32, @truncate(str.len)); var b: u32 = a *% 5; var c: u32 = 9; const d: u32 = b; @@ -73,7 +73,7 @@ pub const CityHash32 = struct { } fn hash32Len13To24(str: []const u8) u32 { - const len: u32 = @truncate(u32, str.len); + const len: u32 = @as(u32, @truncate(str.len)); const a: u32 = fetch32(str.ptr, (str.len >> 1) - 4); const b: u32 = fetch32(str.ptr, 4); const c: u32 = fetch32(str.ptr, str.len - 8); @@ -95,7 +95,7 @@ pub const CityHash32 = struct { } } - const len: u32 = @truncate(u32, str.len); + const len: u32 = @as(u32, @truncate(str.len)); var h: u32 = len; var g: u32 = c1 *% len; var f: u32 = g; @@ -220,9 +220,9 @@ pub const CityHash64 = struct { const a: u8 = str[0]; const b: u8 = str[str.len >> 1]; const c: u8 = str[str.len - 1]; - const y: u32 = @intCast(u32, a) +% (@intCast(u32, b) << 8); - const z: u32 = @truncate(u32, str.len) +% (@intCast(u32, c) << 2); - return shiftmix(@intCast(u64, y) *% k2 ^ @intCast(u64, z) *% k0) *% k2; + const y: u32 = @as(u32, @intCast(a)) +% (@as(u32, @intCast(b)) << 8); + const z: u32 = @as(u32, @truncate(str.len)) +% (@as(u32, @intCast(c)) << 2); + return shiftmix(@as(u64, @intCast(y)) *% k2 ^ @as(u64, @intCast(z)) *% k0) *% k2; } return k2; } @@ -309,7 +309,7 @@ pub const CityHash64 = struct { var w: WeakPair = weakHashLen32WithSeeds(offsetPtr(str.ptr, str.len - 32), y +% k1, x); x = x *% k1 +% fetch64(str.ptr, 0); - len = (len - 1) & ~@intCast(u64, 63); + len = (len - 1) & ~@as(u64, @intCast(63)); var ptr: [*]const u8 = str.ptr; while (true) { @@ -353,19 +353,19 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 { var i: u32 = 0; while (i < 256) : (i += 1) { - key[i] = @intCast(u8, i); + key[i] = @as(u8, @intCast(i)); var h: HashResult = hash_fn(key[0..i], 256 - i); // comptime can't really do reinterpret casting yet, // so we need to write the bytes manually. for (hashes_bytes[i * @sizeOf(HashResult) ..][0..@sizeOf(HashResult)]) |*byte| { - byte.* = @truncate(u8, h); + byte.* = @as(u8, @truncate(h)); h = h >> 8; } } - return @truncate(u32, hash_fn(&hashes_bytes, 0)); + return @as(u32, @truncate(hash_fn(&hashes_bytes, 0))); } fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 { diff --git a/lib/std/hash/crc.zig b/lib/std/hash/crc.zig index da250af1bf8d..3e1e458ffc36 100644 --- a/lib/std/hash/crc.zig +++ b/lib/std/hash/crc.zig @@ -65,7 +65,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type { } inline fn tableEntry(index: I) I { - return lookup_table[@intCast(u8, index & 0xFF)]; + return lookup_table[@as(u8, @intCast(index & 0xFF))]; } pub fn update(self: *Self, bytes: []const u8) void { @@ -95,7 +95,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type { if (!algorithm.reflect_output) { c >>= @bitSizeOf(I) - @bitSizeOf(W); } - return @intCast(W, c ^ algorithm.xor_output); + return @as(W, @intCast(c ^ algorithm.xor_output)); } pub fn hash(bytes: []const u8) W { @@ -125,7 +125,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type { var tables: [8][256]u32 = undefined; for (&tables[0], 0..) |*e, i| { - var crc = @intCast(u32, i); + var crc = @as(u32, @intCast(i)); var j: usize = 0; while (j < 8) : (j += 1) { if (crc & 1 == 1) { @@ -142,7 +142,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type { var crc = tables[0][i]; var j: usize = 1; while (j < 8) : (j += 1) { - const index = @truncate(u8, crc); + const index = @as(u8, @truncate(crc)); crc = tables[0][index] ^ (crc >> 8); tables[j][i] = crc; } @@ -170,14 +170,14 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type { lookup_tables[1][p[6]] ^ lookup_tables[2][p[5]] ^ lookup_tables[3][p[4]] ^ - lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ - lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ - lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ - lookup_tables[7][@truncate(u8, self.crc >> 0)]; + lookup_tables[4][@as(u8, @truncate(self.crc >> 24))] ^ + lookup_tables[5][@as(u8, @truncate(self.crc >> 16))] ^ + lookup_tables[6][@as(u8, @truncate(self.crc >> 8))] ^ + lookup_tables[7][@as(u8, @truncate(self.crc >> 0))]; } while (i < input.len) : (i += 1) { - const index = @truncate(u8, self.crc) ^ input[i]; + const index = @as(u8, @truncate(self.crc)) ^ input[i]; self.crc = (self.crc >> 8) ^ lookup_tables[0][index]; } } @@ -218,7 +218,7 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type { var table: [16]u32 = undefined; for (&table, 0..) |*e, i| { - var crc = @intCast(u32, i * 16); + var crc = @as(u32, @intCast(i * 16)); var j: usize = 0; while (j < 8) : (j += 1) { if (crc & 1 == 1) { @@ -241,8 +241,8 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type { pub fn update(self: *Self, input: []const u8) void { for (input) |b| { - self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4); - self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4); + self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 0)))] ^ (self.crc >> 4); + self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 4)))] ^ (self.crc >> 4); } } diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig index 753439a4cf92..bd433874edf4 100644 --- a/lib/std/hash/murmur.zig +++ b/lib/std/hash/murmur.zig @@ -14,9 +14,9 @@ pub const Murmur2_32 = struct { pub fn hashWithSeed(str: []const u8, seed: u32) u32 { const m: u32 = 0x5bd1e995; - const len = @truncate(u32, str.len); + const len = @as(u32, @truncate(str.len)); var h1: u32 = seed ^ len; - for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| { + for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| { var k1: u32 = v; if (native_endian == .Big) k1 = @byteSwap(k1); @@ -29,13 +29,13 @@ pub const Murmur2_32 = struct { const offset = len & 0xfffffffc; const rest = len & 3; if (rest >= 3) { - h1 ^= @intCast(u32, str[offset + 2]) << 16; + h1 ^= @as(u32, @intCast(str[offset + 2])) << 16; } if (rest >= 2) { - h1 ^= @intCast(u32, str[offset + 1]) << 8; + h1 ^= @as(u32, @intCast(str[offset + 1])) << 8; } if (rest >= 1) { - h1 ^= @intCast(u32, str[offset + 0]); + h1 ^= @as(u32, @intCast(str[offset + 0])); h1 *%= m; } h1 ^= h1 >> 13; @@ -73,12 +73,12 @@ pub const Murmur2_32 = struct { const len: u32 = 8; var h1: u32 = seed ^ len; var k1: u32 = undefined; - k1 = @truncate(u32, v) *% m; + k1 = @as(u32, @truncate(v)) *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; h1 ^= k1; - k1 = @truncate(u32, v >> 32) *% m; + k1 = @as(u32, @truncate(v >> 32)) *% m; k1 ^= k1 >> 24; k1 *%= m; h1 *%= m; @@ -100,7 +100,7 @@ pub const Murmur2_64 = struct { pub fn hashWithSeed(str: []const u8, seed: u64) u64 { const m: u64 = 0xc6a4a7935bd1e995; var h1: u64 = seed ^ (@as(u64, str.len) *% m); - for (@ptrCast([*]align(1) const u64, str.ptr)[0 .. str.len / 8]) |v| { + for (@as([*]align(1) const u64, @ptrCast(str.ptr))[0 .. str.len / 8]) |v| { var k1: u64 = v; if (native_endian == .Big) k1 = @byteSwap(k1); @@ -114,7 +114,7 @@ pub const Murmur2_64 = struct { const offset = str.len - rest; if (rest > 0) { var k1: u64 = 0; - @memcpy(@ptrCast([*]u8, &k1)[0..rest], str[offset..]); + @memcpy(@as([*]u8, @ptrCast(&k1))[0..rest], str[offset..]); if (native_endian == .Big) k1 = @byteSwap(k1); h1 ^= k1; @@ -178,9 +178,9 @@ pub const Murmur3_32 = struct { pub fn hashWithSeed(str: []const u8, seed: u32) u32 { const c1: u32 = 0xcc9e2d51; const c2: u32 = 0x1b873593; - const len = @truncate(u32, str.len); + const len = @as(u32, @truncate(str.len)); var h1: u32 = seed; - for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| { + for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| { var k1: u32 = v; if (native_endian == .Big) k1 = @byteSwap(k1); @@ -197,13 +197,13 @@ pub const Murmur3_32 = struct { const offset = len & 0xfffffffc; const rest = len & 3; if (rest == 3) { - k1 ^= @intCast(u32, str[offset + 2]) << 16; + k1 ^= @as(u32, @intCast(str[offset + 2])) << 16; } if (rest >= 2) { - k1 ^= @intCast(u32, str[offset + 1]) << 8; + k1 ^= @as(u32, @intCast(str[offset + 1])) << 8; } if (rest >= 1) { - k1 ^= @intCast(u32, str[offset + 0]); + k1 ^= @as(u32, @intCast(str[offset + 0])); k1 *%= c1; k1 = rotl32(k1, 15); k1 *%= c2; @@ -255,14 +255,14 @@ pub const Murmur3_32 = struct { const len: u32 = 8; var h1: u32 = seed; var k1: u32 = undefined; - k1 = @truncate(u32, v) *% c1; + k1 = @as(u32, @truncate(v)) *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 *%= 5; h1 +%= 0xe6546b64; - k1 = @truncate(u32, v >> 32) *% c1; + k1 = @as(u32, @truncate(v >> 32)) *% c1; k1 = rotl32(k1, 15); k1 *%= c2; h1 ^= k1; @@ -286,15 +286,15 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { var i: u32 = 0; while (i < 256) : (i += 1) { - key[i] = @truncate(u8, i); + key[i] = @as(u8, @truncate(i)); var h = hash_fn(key[0..i], 256 - i); if (native_endian == .Big) h = @byteSwap(h); - @memcpy(hashes[i * hashbytes ..][0..hashbytes], @ptrCast([*]u8, &h)); + @memcpy(hashes[i * hashbytes ..][0..hashbytes], @as([*]u8, @ptrCast(&h))); } - return @truncate(u32, hash_fn(&hashes, 0)); + return @as(u32, @truncate(hash_fn(&hashes, 0))); } test "murmur2_32" { @@ -307,8 +307,8 @@ test "murmur2_32" { v0le = @byteSwap(v0le); v1le = @byteSwap(v1le); } - try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0)); - try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1)); + try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_32.hashUint32(v0)); + try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_32.hashUint64(v1)); } test "murmur2_64" { @@ -321,8 +321,8 @@ test "murmur2_64" { v0le = @byteSwap(v0le); v1le = @byteSwap(v1le); } - try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0)); - try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1)); + try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_64.hashUint32(v0)); + try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_64.hashUint64(v1)); } test "murmur3_32" { @@ -335,6 +335,6 @@ test "murmur3_32" { v0le = @byteSwap(v0le); v1le = @byteSwap(v1le); } - try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0)); - try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1)); + try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur3_32.hashUint32(v0)); + try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur3_32.hashUint64(v1)); } diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index 3573745444e9..aced3be66ec8 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -132,8 +132,8 @@ pub const Wyhash = struct { inline fn mum(a: *u64, b: *u64) void { const x = @as(u128, a.*) *% b.*; - a.* = @truncate(u64, x); - b.* = @truncate(u64, x >> 64); + a.* = @as(u64, @truncate(x)); + b.* = @as(u64, @truncate(x >> 64)); } inline fn mix(a_: u64, b_: u64) u64 { @@ -252,7 +252,7 @@ test "test ensure idempotent final call" { test "iterative non-divisible update" { var buf: [8192]u8 = undefined; for (&buf, 0..) |*e, i| { - e.* = @truncate(u8, i); + e.* = @as(u8, @truncate(i)); } const seed = 0x128dad08f; diff --git a/lib/std/hash/xxhash.zig b/lib/std/hash/xxhash.zig index 3122406488db..f1d1da429d58 100644 --- a/lib/std/hash/xxhash.zig +++ b/lib/std/hash/xxhash.zig @@ -212,7 +212,7 @@ pub const XxHash32 = struct { rotl(u32, self.acc3, 12) +% rotl(u32, self.acc4, 18); } - acc = acc +% @intCast(u32, self.byte_count) +% @intCast(u32, self.buf_len); + acc = acc +% @as(u32, @intCast(self.byte_count)) +% @as(u32, @intCast(self.buf_len)); var pos: usize = 0; while (pos + 4 <= self.buf_len) : (pos += 4) { diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 4f1639cd60de..0afe6f9643a3 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -101,7 +101,7 @@ pub const StringIndexContext = struct { } pub fn hash(self: @This(), x: u32) u64 { - const x_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + x, 0); + const x_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + x, 0); return hashString(x_slice); } }; @@ -110,7 +110,7 @@ pub const StringIndexAdapter = struct { bytes: *std.ArrayListUnmanaged(u8), pub fn eql(self: @This(), a_slice: []const u8, b: u32) bool { - const b_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + b, 0); + const b_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + b, 0); return mem.eql(u8, a_slice, b_slice); } @@ -777,25 +777,25 @@ pub fn HashMapUnmanaged( fingerprint: FingerPrint = free, used: u1 = 0, - const slot_free = @bitCast(u8, Metadata{ .fingerprint = free }); - const slot_tombstone = @bitCast(u8, Metadata{ .fingerprint = tombstone }); + const slot_free = @as(u8, @bitCast(Metadata{ .fingerprint = free })); + const slot_tombstone = @as(u8, @bitCast(Metadata{ .fingerprint = tombstone })); pub fn isUsed(self: Metadata) bool { return self.used == 1; } pub fn isTombstone(self: Metadata) bool { - return @bitCast(u8, self) == slot_tombstone; + return @as(u8, @bitCast(self)) == slot_tombstone; } pub fn isFree(self: Metadata) bool { - return @bitCast(u8, self) == slot_free; + return @as(u8, @bitCast(self)) == slot_free; } pub fn takeFingerprint(hash: Hash) FingerPrint { const hash_bits = @typeInfo(Hash).Int.bits; const fp_bits = @typeInfo(FingerPrint).Int.bits; - return @truncate(FingerPrint, hash >> (hash_bits - fp_bits)); + return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits))); } pub fn fill(self: *Metadata, fp: FingerPrint) void { @@ -899,7 +899,7 @@ pub fn HashMapUnmanaged( } fn capacityForSize(size: Size) Size { - var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1); + var new_cap = @as(u32, @truncate((@as(u64, size) * 100) / max_load_percentage + 1)); new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable; return new_cap; } @@ -927,7 +927,7 @@ pub fn HashMapUnmanaged( if (self.metadata) |_| { self.initMetadatas(); self.size = 0; - self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100); + self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100)); } } @@ -942,7 +942,7 @@ pub fn HashMapUnmanaged( } fn header(self: *const Self) *Header { - return @ptrCast(*Header, @ptrCast([*]Header, @alignCast(@alignOf(Header), self.metadata.?)) - 1); + return @ptrCast(@as([*]Header, @ptrCast(@alignCast(self.metadata.?))) - 1); } fn keys(self: *const Self) [*]K { @@ -1033,7 +1033,7 @@ pub fn HashMapUnmanaged( const hash = ctx.hash(key); const mask = self.capacity() - 1; - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (metadata[0].isUsed()) { @@ -1147,7 +1147,7 @@ pub fn HashMapUnmanaged( const fingerprint = Metadata.takeFingerprint(hash); // Don't loop indefinitely when there are no empty slots. var limit = self.capacity(); - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (!metadata[0].isFree() and limit != 0) { @@ -1325,7 +1325,7 @@ pub fn HashMapUnmanaged( const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); var limit = self.capacity(); - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var first_tombstone_idx: usize = self.capacity(); // invalid index var metadata = self.metadata.? + idx; @@ -1450,7 +1450,7 @@ pub fn HashMapUnmanaged( } fn initMetadatas(self: *Self) void { - @memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0); + @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0); } // This counts the number of occupied slots (not counting tombstones), which is @@ -1458,7 +1458,7 @@ pub fn HashMapUnmanaged( fn load(self: *const Self) Size { const max_load = (self.capacity() * max_load_percentage) / 100; assert(max_load >= self.available); - return @truncate(Size, max_load - self.available); + return @as(Size, @truncate(max_load - self.available)); } fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) Allocator.Error!void { @@ -1480,7 +1480,7 @@ pub fn HashMapUnmanaged( const new_cap = capacityForSize(self.size); try other.allocate(allocator, new_cap); other.initMetadatas(); - other.available = @truncate(u32, (new_cap * max_load_percentage) / 100); + other.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); var i: Size = 0; var metadata = self.metadata.?; @@ -1515,7 +1515,7 @@ pub fn HashMapUnmanaged( defer map.deinit(allocator); try map.allocate(allocator, new_cap); map.initMetadatas(); - map.available = @truncate(u32, (new_cap * max_load_percentage) / 100); + map.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); if (self.size != 0) { const old_capacity = self.capacity(); @@ -1558,15 +1558,15 @@ pub fn HashMapUnmanaged( const metadata = ptr + @sizeOf(Header); - const hdr = @ptrFromInt(*Header, ptr); + const hdr = @as(*Header, @ptrFromInt(ptr)); if (@sizeOf([*]V) != 0) { - hdr.values = @ptrFromInt([*]V, ptr + vals_start); + hdr.values = @as([*]V, @ptrFromInt(ptr + vals_start)); } if (@sizeOf([*]K) != 0) { - hdr.keys = @ptrFromInt([*]K, ptr + keys_start); + hdr.keys = @as([*]K, @ptrFromInt(ptr + keys_start)); } hdr.capacity = new_capacity; - self.metadata = @ptrFromInt([*]Metadata, metadata); + self.metadata = @as([*]Metadata, @ptrFromInt(metadata)); } fn deallocate(self: *Self, allocator: Allocator) void { @@ -1589,7 +1589,7 @@ pub fn HashMapUnmanaged( const total_size = std.mem.alignForward(usize, vals_end, max_align); - const slice = @ptrFromInt([*]align(max_align) u8, @intFromPtr(self.header()))[0..total_size]; + const slice = @as([*]align(max_align) u8, @ptrFromInt(@intFromPtr(self.header())))[0..total_size]; allocator.free(slice); self.metadata = null; diff --git a/lib/std/heap.zig b/lib/std/heap.zig index fd5b0754fea0..d04f9593452f 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -61,11 +61,11 @@ const CAllocator = struct { pub const supports_posix_memalign = @hasDecl(c, "posix_memalign"); fn getHeader(ptr: [*]u8) *[*]u8 { - return @ptrFromInt(*[*]u8, @intFromPtr(ptr) - @sizeOf(usize)); + return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); } fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 { - const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align); + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); if (supports_posix_memalign) { // The posix_memalign only accepts alignment values that are a // multiple of the pointer size @@ -75,13 +75,13 @@ const CAllocator = struct { if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0) return null; - return @ptrCast([*]u8, aligned_ptr); + return @as([*]u8, @ptrCast(aligned_ptr)); } // Thin wrapper around regular malloc, overallocate to account for // alignment padding and store the original malloc()'ed pointer before // the aligned address. - var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); + var unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null)); const unaligned_addr = @intFromPtr(unaligned_ptr); const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); @@ -195,7 +195,7 @@ fn rawCAlloc( // type in C that is size 8 and has 16 byte alignment, so the alignment may // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc // is allowed to return a 1-byte aligned pointer. - return @ptrCast(?[*]u8, c.malloc(len)); + return @as(?[*]u8, @ptrCast(c.malloc(len))); } fn rawCResize( @@ -283,7 +283,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { } fn getRecordPtr(buf: []u8) *align(1) usize { - return @ptrFromInt(*align(1) usize, @intFromPtr(buf.ptr) + buf.len); + return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len)); } fn alloc( @@ -293,9 +293,9 @@ pub const HeapAllocator = switch (builtin.os.tag) { return_address: usize, ) ?[*]u8 { _ = return_address; - const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx)); + const self: *HeapAllocator = @ptrCast(@alignCast(ctx)); - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); const amt = n + ptr_align - 1 + @sizeOf(usize); const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst); const heap_handle = optional_heap_handle orelse blk: { @@ -308,7 +308,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const root_addr = @intFromPtr(ptr); const aligned_addr = mem.alignForward(usize, root_addr, ptr_align); - const buf = @ptrFromInt([*]u8, aligned_addr)[0..n]; + const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..n]; getRecordPtr(buf).* = root_addr; return buf.ptr; } @@ -322,7 +322,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { ) bool { _ = log2_buf_align; _ = return_address; - const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx)); + const self: *HeapAllocator = @ptrCast(@alignCast(ctx)); const root_addr = getRecordPtr(buf).*; const align_offset = @intFromPtr(buf.ptr) - root_addr; @@ -330,10 +330,10 @@ pub const HeapAllocator = switch (builtin.os.tag) { const new_ptr = os.windows.kernel32.HeapReAlloc( self.heap_handle.?, os.windows.HEAP_REALLOC_IN_PLACE_ONLY, - @ptrFromInt(*anyopaque, root_addr), + @as(*anyopaque, @ptrFromInt(root_addr)), amt, ) orelse return false; - assert(new_ptr == @ptrFromInt(*anyopaque, root_addr)); + assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr))); getRecordPtr(buf.ptr[0..new_size]).* = root_addr; return true; } @@ -346,8 +346,8 @@ pub const HeapAllocator = switch (builtin.os.tag) { ) void { _ = log2_buf_align; _ = return_address; - const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx)); - os.windows.HeapFree(self.heap_handle.?, 0, @ptrFromInt(*anyopaque, getRecordPtr(buf).*)); + const self: *HeapAllocator = @ptrCast(@alignCast(ctx)); + os.windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*))); } }, else => @compileError("Unsupported OS"), @@ -415,9 +415,9 @@ pub const FixedBufferAllocator = struct { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null; const adjusted_index = self.end_index + adjust_off; const new_end_index = adjusted_index + n; @@ -433,7 +433,7 @@ pub const FixedBufferAllocator = struct { new_size: usize, return_address: usize, ) bool { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = return_address; assert(self.ownsSlice(buf)); // sanity check @@ -462,7 +462,7 @@ pub const FixedBufferAllocator = struct { log2_buf_align: u8, return_address: usize, ) void { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = return_address; assert(self.ownsSlice(buf)); // sanity check @@ -473,9 +473,9 @@ pub const FixedBufferAllocator = struct { } fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx)); + const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); var end_index = @atomicLoad(usize, &self.end_index, .SeqCst); while (true) { const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null; @@ -537,7 +537,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra); } @@ -549,7 +549,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra); } else { @@ -563,7 +563,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra); } else { @@ -728,14 +728,14 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void { try testing.expect(slice.len == 100); for (slice, 0..) |*item, i| { item.* = try allocator.create(i32); - item.*.* = @intCast(i32, i); + item.*.* = @as(i32, @intCast(i)); } slice = try allocator.realloc(slice, 20000); try testing.expect(slice.len == 20000); for (slice[0..100], 0..) |item, i| { - try testing.expect(item.* == @intCast(i32, i)); + try testing.expect(item.* == @as(i32, @intCast(i))); allocator.destroy(item); } diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 12a0bdcf3093..3e92aa5eec33 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -27,7 +27,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch return null; - return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr)); + return @ptrCast(addr); } const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered); @@ -40,7 +40,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { 0, ) catch return null; assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); - const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len); + const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic); return slice.ptr; } @@ -66,7 +66,7 @@ fn resize( // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. w.VirtualFree( - @ptrFromInt(*anyopaque, new_addr_end), + @as(*anyopaque, @ptrFromInt(new_addr_end)), old_addr_end - new_addr_end, w.MEM_DECOMMIT, ); @@ -85,9 +85,9 @@ fn resize( return true; if (new_size_aligned < buf_aligned_len) { - const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned); + const ptr = buf_unaligned.ptr + new_size_aligned; // TODO: if the next_mmap_addr_hint is within the unmapped range, update it - os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); + os.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned])); return true; } @@ -104,7 +104,6 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); - const ptr = @alignCast(mem.page_size, slice.ptr); - os.munmap(ptr[0..buf_aligned_len]); + os.munmap(@alignCast(slice.ptr[0..buf_aligned_len])); } } diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/heap/ThreadSafeAllocator.zig index fe10eb2fdb27..12bb095b30fd 100644 --- a/lib/std/heap/ThreadSafeAllocator.zig +++ b/lib/std/heap/ThreadSafeAllocator.zig @@ -15,7 +15,7 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -23,7 +23,7 @@ fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -32,7 +32,7 @@ fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_ad } fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/heap/WasmAllocator.zig index e3e436fd2b3b..60051b688a0c 100644 --- a/lib/std/heap/WasmAllocator.zig +++ b/lib/std/heap/WasmAllocator.zig @@ -47,7 +47,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* _ = ctx; _ = return_address; // Make room for the freelist next pointer. - const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align); + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); const actual_len = @max(len +| @sizeOf(usize), alignment); const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null; const class = math.log2(slot_size) - min_class; @@ -55,7 +55,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* const addr = a: { const top_free_ptr = frees[class]; if (top_free_ptr != 0) { - const node = @ptrFromInt(*usize, top_free_ptr + (slot_size - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize)))); frees[class] = node.*; break :a top_free_ptr; } @@ -74,11 +74,11 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* break :a next_addr; } }; - return @ptrFromInt([*]u8, addr); + return @as([*]u8, @ptrFromInt(addr)); } const bigpages_needed = bigPagesNeeded(actual_len); const addr = allocBigPages(bigpages_needed); - return @ptrFromInt([*]u8, addr); + return @as([*]u8, @ptrFromInt(addr)); } fn resize( @@ -92,7 +92,7 @@ fn resize( _ = return_address; // We don't want to move anything from one size class to another, but we // can recover bytes in between powers of two. - const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align); + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align); const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align); const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len); @@ -117,20 +117,20 @@ fn free( ) void { _ = ctx; _ = return_address; - const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align); + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); const actual_len = @max(buf.len + @sizeOf(usize), buf_align); const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len); const class = math.log2(slot_size) - min_class; const addr = @intFromPtr(buf.ptr); if (class < size_class_count) { - const node = @ptrFromInt(*usize, addr + (slot_size - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize)))); node.* = frees[class]; frees[class] = addr; } else { const bigpages_needed = bigPagesNeeded(actual_len); const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed); const big_slot_size_bytes = pow2_pages * bigpage_size; - const node = @ptrFromInt(*usize, addr + (big_slot_size_bytes - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize)))); const big_class = math.log2(pow2_pages); node.* = big_frees[big_class]; big_frees[big_class] = addr; @@ -148,14 +148,14 @@ fn allocBigPages(n: usize) usize { const top_free_ptr = big_frees[class]; if (top_free_ptr != 0) { - const node = @ptrFromInt(*usize, top_free_ptr + (slot_size_bytes - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize)))); big_frees[class] = node.*; return top_free_ptr; } const page_index = @wasmMemoryGrow(0, pow2_pages * pages_per_bigpage); if (page_index <= 0) return 0; - const addr = @intCast(u32, page_index) * wasm.page_size; + const addr = @as(u32, @intCast(page_index)) * wasm.page_size; return addr; } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index c77164ee2de2..8f484c52f621 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -40,7 +40,7 @@ const FreeBlock = struct { fn getBit(self: FreeBlock, idx: usize) PageStatus { const bit_offset = 0; - return @enumFromInt(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset)); + return @as(PageStatus, @enumFromInt(Io.get(mem.sliceAsBytes(self.data), idx, bit_offset))); } fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { @@ -63,7 +63,7 @@ const FreeBlock = struct { fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize { @setCold(true); for (self.data, 0..) |segment, i| { - const spills_into_next = @bitCast(i128, segment) < 0; + const spills_into_next = @as(i128, @bitCast(segment)) < 0; const has_enough_bits = @popCount(segment) >= num_pages; if (!spills_into_next and !has_enough_bits) continue; @@ -109,7 +109,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { if (len > maxInt(usize) - (mem.page_size - 1)) return null; const page_count = nPages(len); const page_idx = allocPages(page_count, log2_align) catch return null; - return @ptrFromInt([*]u8, page_idx * mem.page_size); + return @as([*]u8, @ptrFromInt(page_idx * mem.page_size)); } fn allocPages(page_count: usize, log2_align: u8) !usize { @@ -129,7 +129,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { const next_page_addr = next_page_idx * mem.page_size; const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align); const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size); - const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count)); + const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count))); if (result <= 0) return error.OutOfMemory; assert(result == next_page_idx); @@ -137,7 +137,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { if (drop_page_count > 0) { freePages(next_page_idx, aligned_page_idx); } - return @intCast(usize, aligned_page_idx); + return @as(usize, @intCast(aligned_page_idx)); } fn freePages(start: usize, end: usize) void { @@ -151,7 +151,7 @@ fn freePages(start: usize, end: usize) void { // TODO: would it be better if we use the first page instead? new_end -= 1; - extended.data = @ptrFromInt([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)]; + extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)]; // Since this is the first page being freed and we consume it, assume *nothing* is free. @memset(extended.data, PageStatus.none_free); } diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index a8d6641d8d12..d547987f63ec 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -48,7 +48,7 @@ pub const ArenaAllocator = struct { // this has to occur before the free because the free frees node const next_it = node.next; const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - const alloc_buf = @ptrCast([*]u8, node)[0..node.data]; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); it = next_it; } @@ -128,7 +128,7 @@ pub const ArenaAllocator = struct { const next_it = node.next; if (next_it == null) break node; - const alloc_buf = @ptrCast([*]u8, node)[0..node.data]; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); it = next_it; } else null; @@ -140,7 +140,7 @@ pub const ArenaAllocator = struct { // perfect, no need to invoke the child_allocator if (first_node.data == total_size) return true; - const first_alloc_buf = @ptrCast([*]u8, first_node)[0..first_node.data]; + const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data]; if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) { // successful resize first_node.data = total_size; @@ -151,7 +151,7 @@ pub const ArenaAllocator = struct { return false; }; self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress()); - const node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), new_ptr)); + const node: *BufNode = @ptrCast(@alignCast(new_ptr)); node.* = .{ .data = total_size }; self.state.buffer_list.first = node; } @@ -166,7 +166,7 @@ pub const ArenaAllocator = struct { const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse return null; - const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), ptr)); + const buf_node: *BufNode = @ptrCast(@alignCast(ptr)); buf_node.* = .{ .data = len }; self.state.buffer_list.prepend(buf_node); self.state.end_index = 0; @@ -174,16 +174,16 @@ pub const ArenaAllocator = struct { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); var cur_node = if (self.state.buffer_list.first) |first_node| first_node else (self.createNode(0, n + ptr_align) orelse return null); while (true) { - const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; + const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index; const adjusted_addr = mem.alignForward(usize, addr, ptr_align); @@ -208,12 +208,12 @@ pub const ArenaAllocator = struct { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = ret_addr; const cur_node = self.state.buffer_list.first orelse return false; - const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data]; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { // It's not the most recent allocation, so it cannot be expanded, // but it's fine if they want to make it smaller. @@ -235,10 +235,10 @@ pub const ArenaAllocator = struct { _ = log2_buf_align; _ = ret_addr; - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); const cur_node = self.state.buffer_list.first orelse return; - const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data]; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { self.state.end_index -= buf.len; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 98375c850eed..11f7d9dd271a 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -250,7 +250,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { used_count: SlotIndex, fn usedBits(bucket: *BucketHeader, index: usize) *u8 { - return @ptrFromInt(*u8, @intFromPtr(bucket) + @sizeOf(BucketHeader) + index); + return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index)); } fn stackTracePtr( @@ -259,10 +259,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { slot_index: SlotIndex, trace_kind: TraceKind, ) *[stack_n]usize { - const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class); + const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(size_class); const addr = start_ptr + one_trace_size * traces_per_slot * slot_index + @intFromEnum(trace_kind) * @as(usize, one_trace_size); - return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr)); + return @ptrCast(@alignCast(addr)); } fn captureStackTrace( @@ -338,9 +338,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (used_byte != 0) { var bit_index: u3 = 0; while (true) : (bit_index += 1) { - const is_used = @truncate(u1, used_byte >> bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte >> bit_index)) != 0; if (is_used) { - const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index); + const slot_index = @as(SlotIndex, @intCast(used_bits_byte * 8 + bit_index)); const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc); const addr = bucket.page + slot_index * size_class; log.err("memory address 0x{x} leaked: {}", .{ @@ -361,7 +361,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var leaks = false; for (self.buckets, 0..) |optional_bucket, bucket_i| { const first_bucket = optional_bucket orelse continue; - const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i); + const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i)); const used_bits_count = usedBitsCount(size_class); var bucket = first_bucket; while (true) { @@ -385,7 +385,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn freeBucket(self: *Self, bucket: *BucketHeader, size_class: usize) void { const bucket_size = bucketSize(size_class); - const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size]; + const bucket_slice = @as([*]align(@alignOf(BucketHeader)) u8, @ptrCast(bucket))[0..bucket_size]; self.backing_allocator.free(bucket_slice); } @@ -444,7 +444,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.small_allocations.deinit(self.backing_allocator); } self.* = undefined; - return @enumFromInt(Check, @intFromBool(leaks)); + return @as(Check, @enumFromInt(@intFromBool(leaks))); } fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void { @@ -496,7 +496,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { bucket.alloc_cursor += 1; var used_bits_byte = bucket.usedBits(slot_index / 8); - const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary + const used_bit_index: u3 = @as(u3, @intCast(slot_index % 8)); // TODO cast should be unnecessary used_bits_byte.* |= (@as(u8, 1) << used_bit_index); bucket.used_count += 1; bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc); @@ -667,8 +667,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { new_size: usize, ret_addr: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); - const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8); + const self: *Self = @ptrCast(@alignCast(ctx)); + const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -704,11 +704,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); }; const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); - const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; - const used_bit_index = @intCast(u3, slot_index % 8); + const used_bit_index = @as(u3, @intCast(slot_index % 8)); const used_byte = bucket.usedBits(used_byte_index); - const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0; if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); @@ -739,8 +739,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } if (log2_old_align != entry.value_ptr.log2_ptr_align) { log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{ - @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align), - @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)), bucketStackTrace(bucket, size_class, slot_index, .alloc), free_stack_trace, }); @@ -786,8 +786,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { log2_old_align_u8: u8, ret_addr: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); - const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8); + const self: *Self = @ptrCast(@alignCast(ctx)); + const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -825,11 +825,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return; }; const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); - const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; - const used_bit_index = @intCast(u3, slot_index % 8); + const used_bit_index = @as(u3, @intCast(slot_index % 8)); const used_byte = bucket.usedBits(used_byte_index); - const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0; if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); @@ -861,8 +861,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } if (log2_old_align != entry.value_ptr.log2_ptr_align) { log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{ - @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align), - @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)), bucketStackTrace(bucket, size_class, slot_index, .alloc), free_stack_trace, }); @@ -896,7 +896,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } else { // move alloc_cursor to end so we can tell size_class later const slot_count = @divExact(page_size, size_class); - bucket.alloc_cursor = @truncate(SlotIndex, slot_count); + bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count)); if (self.empty_buckets) |prev_bucket| { // empty_buckets is ordered newest to oldest through prev so that if // config.never_unmap is false and backing_allocator reuses freed memory @@ -936,11 +936,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); if (!self.isAllocationAllowed(len)) return null; - return allocInner(self, len, @intCast(Allocator.Log2Align, log2_ptr_align), ret_addr) catch return null; + return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null; } fn allocInner( @@ -949,7 +949,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { log2_ptr_align: Allocator.Log2Align, ret_addr: usize, ) Allocator.Error![*]u8 { - const new_aligned_size = @max(len, @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align)); + const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse @@ -1002,7 +1002,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const bucket_size = bucketSize(size_class); const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size); - const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr); + const ptr = @as(*BucketHeader, @ptrCast(bucket_bytes.ptr)); ptr.* = BucketHeader{ .prev = ptr, .next = ptr, diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index b2d83c416b59..b5c86c9bebbf 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -34,7 +34,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.writer.print("alloc : {}", .{len}) catch {}; const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra); if (result != null) { @@ -52,7 +52,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (new_len <= buf.len) { self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { @@ -77,7 +77,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.writer.print("free : {}\n", .{buf.len}) catch {}; self.parent_allocator.rawFree(buf, log2_buf_align, ra); } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 0d32b5405e4a..6924a284e3b0 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -59,7 +59,7 @@ pub fn ScopedLoggingAllocator( log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra); if (result != null) { logHelper( @@ -84,7 +84,7 @@ pub fn ScopedLoggingAllocator( new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) { if (new_len <= buf.len) { logHelper( @@ -118,7 +118,7 @@ pub fn ScopedLoggingAllocator( log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.parent_allocator.rawFree(buf, log2_buf_align, ra); logHelper(success_log_level, "free - len: {}", .{buf.len}); } diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index 3fc7dfbfca88..b56a15d006ad 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -70,7 +70,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type var i: usize = 0; while (i < initial_size) : (i += 1) { const raw_mem = try pool.allocNew(); - const free_node = @ptrCast(NodePtr, raw_mem); + const free_node = @as(NodePtr, @ptrCast(raw_mem)); free_node.* = Node{ .next = pool.free_list, }; @@ -106,11 +106,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pool.free_list = item.next; break :blk item; } else if (pool_options.growable) - @ptrCast(NodePtr, try pool.allocNew()) + @as(NodePtr, @ptrCast(try pool.allocNew())) else return error.OutOfMemory; - const ptr = @ptrCast(ItemPtr, node); + const ptr = @as(ItemPtr, @ptrCast(node)); ptr.* = undefined; return ptr; } @@ -120,7 +120,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pub fn destroy(pool: *Pool, ptr: ItemPtr) void { ptr.* = undefined; - const node = @ptrCast(NodePtr, ptr); + const node = @as(NodePtr, @ptrCast(ptr)); node.* = Node{ .next = pool.free_list, }; diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 942ff4904d9d..db576e72b2e0 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -187,7 +187,7 @@ pub const Connection = struct { const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1); if (nread == 0) return error.EndOfStream; conn.read_start = 0; - conn.read_end = @intCast(u16, nread); + conn.read_end = @as(u16, @intCast(nread)); } pub fn peek(conn: *Connection) []const u8 { @@ -208,8 +208,8 @@ pub const Connection = struct { if (available_read > available_buffer) { // partially read buffered data @memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]); - out_index += @intCast(u16, available_buffer); - conn.read_start += @intCast(u16, available_buffer); + out_index += @as(u16, @intCast(available_buffer)); + conn.read_start += @as(u16, @intCast(available_buffer)); break; } else if (available_read > 0) { // fully read buffered data @@ -343,7 +343,7 @@ pub const Response = struct { else => return error.HttpHeadersInvalid, }; if (first_line[8] != ' ') return error.HttpHeadersInvalid; - const status = @enumFromInt(http.Status, parseInt3(first_line[9..12].*)); + const status = @as(http.Status, @enumFromInt(parseInt3(first_line[9..12].*))); const reason = mem.trimLeft(u8, first_line[12..], " "); res.version = version; @@ -415,7 +415,7 @@ pub const Response = struct { } inline fn int64(array: *const [8]u8) u64 { - return @bitCast(u64, array.*); + return @as(u64, @bitCast(array.*)); } fn parseInt3(nnn: @Vector(3, u8)) u10 { @@ -649,7 +649,7 @@ pub const Request = struct { try req.connection.?.data.fill(); const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek()); - req.connection.?.data.drop(@intCast(u16, nchecked)); + req.connection.?.data.drop(@as(u16, @intCast(nchecked))); if (req.response.parser.state.isContent()) break; } @@ -768,7 +768,7 @@ pub const Request = struct { try req.connection.?.data.fill(); const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek()); - req.connection.?.data.drop(@intCast(u16, nchecked)); + req.connection.?.data.drop(@as(u16, @intCast(nchecked))); } if (has_trail) { diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index fe57b5735d14..8c8661ee21d2 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -46,7 +46,7 @@ pub const Connection = struct { const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1); if (nread == 0) return error.EndOfStream; conn.read_start = 0; - conn.read_end = @intCast(u16, nread); + conn.read_end = @as(u16, @intCast(nread)); } pub fn peek(conn: *Connection) []const u8 { @@ -67,8 +67,8 @@ pub const Connection = struct { if (available_read > available_buffer) { // partially read buffered data @memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]); - out_index += @intCast(u16, available_buffer); - conn.read_start += @intCast(u16, available_buffer); + out_index += @as(u16, @intCast(available_buffer)); + conn.read_start += @as(u16, @intCast(available_buffer)); break; } else if (available_read > 0) { // fully read buffered data @@ -268,7 +268,7 @@ pub const Request = struct { } inline fn int64(array: *const [8]u8) u64 { - return @bitCast(u64, array.*); + return @as(u64, @bitCast(array.*)); } method: http.Method, @@ -493,7 +493,7 @@ pub const Response = struct { try res.connection.fill(); const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek()); - res.connection.drop(@intCast(u16, nchecked)); + res.connection.drop(@as(u16, @intCast(nchecked))); if (res.request.parser.state.isContent()) break; } @@ -560,7 +560,7 @@ pub const Response = struct { try res.connection.fill(); const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek()); - res.connection.drop(@intCast(u16, nchecked)); + res.connection.drop(@as(u16, @intCast(nchecked))); } if (has_trail) { diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig index 6bafb084831c..604267bf1626 100644 --- a/lib/std/http/protocol.zig +++ b/lib/std/http/protocol.zig @@ -83,7 +83,7 @@ pub const HeadersParser = struct { /// first byte of content is located at `bytes[result]`. pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 { const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8); - const len = @intCast(u32, bytes.len); + const len = @as(u32, @intCast(bytes.len)); var index: u32 = 0; while (true) { @@ -182,8 +182,8 @@ pub const HeadersParser = struct { const chunk = bytes[index..][0..vector_len]; const v: Vector = chunk.*; - const matches_r = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\r'))); - const matches_n = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\n'))); + const matches_r = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\r')))); + const matches_n = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\n')))); const matches_or: SizeVector = matches_r | matches_n; const matches = @reduce(.Add, matches_or); @@ -234,7 +234,7 @@ pub const HeadersParser = struct { }, 4...vector_len => { inline for (0..vector_len - 3) |i_usize| { - const i = @truncate(u32, i_usize); + const i = @as(u32, @truncate(i_usize)); const b32 = int32(chunk[i..][0..4]); const b16 = intShift(u16, b32); @@ -405,10 +405,10 @@ pub const HeadersParser = struct { /// If the amount returned is less than `bytes.len`, you may assume that the parser is in the `chunk_data` state /// and that the first byte of the chunk is at `bytes[result]`. pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 { - const len = @intCast(u32, bytes.len); + const len = @as(u32, @intCast(bytes.len)); for (bytes[0..], 0..) |c, i| { - const index = @intCast(u32, i); + const index = @as(u32, @intCast(i)); switch (r.state) { .chunk_data_suffix => switch (c) { '\r' => r.state = .chunk_data_suffix_r, @@ -529,7 +529,7 @@ pub const HeadersParser = struct { try conn.fill(); const nread = @min(conn.peek().len, data_avail); - conn.drop(@intCast(u16, nread)); + conn.drop(@as(u16, @intCast(nread))); r.next_chunk_length -= nread; if (r.next_chunk_length == 0) r.done = true; @@ -538,7 +538,7 @@ pub const HeadersParser = struct { } else { const out_avail = buffer.len; - const can_read = @intCast(usize, @min(data_avail, out_avail)); + const can_read = @as(usize, @intCast(@min(data_avail, out_avail))); const nread = try conn.read(buffer[0..can_read]); r.next_chunk_length -= nread; @@ -551,7 +551,7 @@ pub const HeadersParser = struct { try conn.fill(); const i = r.findChunkedLen(conn.peek()); - conn.drop(@intCast(u16, i)); + conn.drop(@as(u16, @intCast(i))); switch (r.state) { .invalid => return error.HttpChunkInvalid, @@ -579,10 +579,10 @@ pub const HeadersParser = struct { try conn.fill(); const nread = @min(conn.peek().len, data_avail); - conn.drop(@intCast(u16, nread)); + conn.drop(@as(u16, @intCast(nread))); r.next_chunk_length -= nread; } else if (out_avail > 0) { - const can_read = @intCast(usize, @min(data_avail, out_avail)); + const can_read: usize = @intCast(@min(data_avail, out_avail)); const nread = try conn.read(buffer[out_index..][0..can_read]); r.next_chunk_length -= nread; out_index += nread; @@ -601,21 +601,21 @@ pub const HeadersParser = struct { }; inline fn int16(array: *const [2]u8) u16 { - return @bitCast(u16, array.*); + return @as(u16, @bitCast(array.*)); } inline fn int24(array: *const [3]u8) u24 { - return @bitCast(u24, array.*); + return @as(u24, @bitCast(array.*)); } inline fn int32(array: *const [4]u8) u32 { - return @bitCast(u32, array.*); + return @as(u32, @bitCast(array.*)); } inline fn intShift(comptime T: type, x: anytype) T { switch (@import("builtin").cpu.arch.endian()) { - .Little => return @truncate(T, x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T))), - .Big => return @truncate(T, x), + .Little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))), + .Big => return @as(T, @truncate(x)), } } @@ -634,7 +634,7 @@ const MockBufferedConnection = struct { const nread = try conn.conn.read(conn.buf[0..]); if (nread == 0) return error.EndOfStream; conn.start = 0; - conn.end = @truncate(u16, nread); + conn.end = @as(u16, @truncate(nread)); } pub fn peek(conn: *MockBufferedConnection) []const u8 { @@ -652,7 +652,7 @@ const MockBufferedConnection = struct { const left = buffer.len - out_index; if (available > 0) { - const can_read = @truncate(u16, @min(available, left)); + const can_read = @as(u16, @truncate(@min(available, left))); @memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]); out_index += can_read; @@ -705,8 +705,8 @@ test "HeadersParser.findHeadersEnd" { for (0..36) |i| { r = HeadersParser.initDynamic(0); - try std.testing.expectEqual(@intCast(u32, i), r.findHeadersEnd(data[0..i])); - try std.testing.expectEqual(@intCast(u32, 35 - i), r.findHeadersEnd(data[i..])); + try std.testing.expectEqual(@as(u32, @intCast(i)), r.findHeadersEnd(data[0..i])); + try std.testing.expectEqual(@as(u32, @intCast(35 - i)), r.findHeadersEnd(data[i..])); } } @@ -761,7 +761,7 @@ test "HeadersParser.read length" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } @@ -792,7 +792,7 @@ test "HeadersParser.read chunked" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } @@ -822,7 +822,7 @@ test "HeadersParser.read chunked trailer" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } @@ -837,7 +837,7 @@ test "HeadersParser.read chunked trailer" { try conn.fill(); const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek()); - conn.drop(@intCast(u16, nchecked)); + conn.drop(@as(u16, @intCast(nchecked))); if (r.state.isContent()) break; } diff --git a/lib/std/io.zig b/lib/std/io.zig index f2804a31075a..e7a4476c0f4c 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -275,7 +275,7 @@ pub fn Poller(comptime StreamEnum: type) type { )) { .pending => { self.windows.active.handles_buf[self.windows.active.count] = handle; - self.windows.active.stream_map[self.windows.active.count] = @enumFromInt(StreamEnum, i); + self.windows.active.stream_map[self.windows.active.count] = @as(StreamEnum, @enumFromInt(i)); self.windows.active.count += 1; }, .closed => {}, // don't add to the wait_objects list diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig index 4bdb0b91943a..7ea2ff5009b6 100644 --- a/lib/std/io/bit_reader.zig +++ b/lib/std/io/bit_reader.zig @@ -60,7 +60,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type) var out_buffer = @as(Buf, 0); if (self.bit_count > 0) { - const n = if (self.bit_count >= bits) @intCast(u3, bits) else self.bit_count; + const n = if (self.bit_count >= bits) @as(u3, @intCast(bits)) else self.bit_count; const shift = u7_bit_count - n; switch (endian) { .Big => { @@ -88,45 +88,45 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type) while (out_bits.* < bits) { const n = bits - out_bits.*; const next_byte = self.forward_reader.readByte() catch |err| switch (err) { - error.EndOfStream => return @intCast(U, out_buffer), + error.EndOfStream => return @as(U, @intCast(out_buffer)), else => |e| return e, }; switch (endian) { .Big => { if (n >= u8_bit_count) { - out_buffer <<= @intCast(u3, u8_bit_count - 1); + out_buffer <<= @as(u3, @intCast(u8_bit_count - 1)); out_buffer <<= 1; out_buffer |= @as(Buf, next_byte); out_bits.* += u8_bit_count; continue; } - const shift = @intCast(u3, u8_bit_count - n); - out_buffer <<= @intCast(BufShift, n); + const shift = @as(u3, @intCast(u8_bit_count - n)); + out_buffer <<= @as(BufShift, @intCast(n)); out_buffer |= @as(Buf, next_byte >> shift); out_bits.* += n; - self.bit_buffer = @truncate(u7, next_byte << @intCast(u3, n - 1)); + self.bit_buffer = @as(u7, @truncate(next_byte << @as(u3, @intCast(n - 1)))); self.bit_count = shift; }, .Little => { if (n >= u8_bit_count) { - out_buffer |= @as(Buf, next_byte) << @intCast(BufShift, out_bits.*); + out_buffer |= @as(Buf, next_byte) << @as(BufShift, @intCast(out_bits.*)); out_bits.* += u8_bit_count; continue; } - const shift = @intCast(u3, u8_bit_count - n); + const shift = @as(u3, @intCast(u8_bit_count - n)); const value = (next_byte << shift) >> shift; - out_buffer |= @as(Buf, value) << @intCast(BufShift, out_bits.*); + out_buffer |= @as(Buf, value) << @as(BufShift, @intCast(out_bits.*)); out_bits.* += n; - self.bit_buffer = @truncate(u7, next_byte >> @intCast(u3, n)); + self.bit_buffer = @as(u7, @truncate(next_byte >> @as(u3, @intCast(n)))); self.bit_count = shift; }, } } - return @intCast(U, out_buffer); + return @as(U, @intCast(out_buffer)); } pub fn alignToByte(self: *Self) void { diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig index 0be2e7ab08fc..ef8f007264d0 100644 --- a/lib/std/io/bit_writer.zig +++ b/lib/std/io/bit_writer.zig @@ -47,27 +47,27 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) const Buf = std.meta.Int(.unsigned, buf_bit_count); const BufShift = math.Log2Int(Buf); - const buf_value = @intCast(Buf, value); + const buf_value = @as(Buf, @intCast(value)); - const high_byte_shift = @intCast(BufShift, buf_bit_count - u8_bit_count); + const high_byte_shift = @as(BufShift, @intCast(buf_bit_count - u8_bit_count)); var in_buffer = switch (endian) { - .Big => buf_value << @intCast(BufShift, buf_bit_count - bits), + .Big => buf_value << @as(BufShift, @intCast(buf_bit_count - bits)), .Little => buf_value, }; var in_bits = bits; if (self.bit_count > 0) { const bits_remaining = u8_bit_count - self.bit_count; - const n = @intCast(u3, if (bits_remaining > bits) bits else bits_remaining); + const n = @as(u3, @intCast(if (bits_remaining > bits) bits else bits_remaining)); switch (endian) { .Big => { - const shift = @intCast(BufShift, high_byte_shift + self.bit_count); - const v = @intCast(u8, in_buffer >> shift); + const shift = @as(BufShift, @intCast(high_byte_shift + self.bit_count)); + const v = @as(u8, @intCast(in_buffer >> shift)); self.bit_buffer |= v; in_buffer <<= n; }, .Little => { - const v = @truncate(u8, in_buffer) << @intCast(u3, self.bit_count); + const v = @as(u8, @truncate(in_buffer)) << @as(u3, @intCast(self.bit_count)); self.bit_buffer |= v; in_buffer >>= n; }, @@ -87,15 +87,15 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) while (in_bits >= u8_bit_count) { switch (endian) { .Big => { - const v = @intCast(u8, in_buffer >> high_byte_shift); + const v = @as(u8, @intCast(in_buffer >> high_byte_shift)); try self.forward_writer.writeByte(v); - in_buffer <<= @intCast(u3, u8_bit_count - 1); + in_buffer <<= @as(u3, @intCast(u8_bit_count - 1)); in_buffer <<= 1; }, .Little => { - const v = @truncate(u8, in_buffer); + const v = @as(u8, @truncate(in_buffer)); try self.forward_writer.writeByte(v); - in_buffer >>= @intCast(u3, u8_bit_count - 1); + in_buffer >>= @as(u3, @intCast(u8_bit_count - 1)); in_buffer >>= 1; }, } @@ -103,10 +103,10 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) } if (in_bits > 0) { - self.bit_count = @intCast(u4, in_bits); + self.bit_count = @as(u4, @intCast(in_bits)); self.bit_buffer = switch (endian) { - .Big => @truncate(u8, in_buffer >> high_byte_shift), - .Little => @truncate(u8, in_buffer), + .Big => @as(u8, @truncate(in_buffer >> high_byte_shift)), + .Little => @as(u8, @truncate(in_buffer)), }; } } diff --git a/lib/std/io/c_writer.zig b/lib/std/io/c_writer.zig index 62c73d371453..ee87a28dc6c7 100644 --- a/lib/std/io/c_writer.zig +++ b/lib/std/io/c_writer.zig @@ -13,7 +13,7 @@ pub fn cWriter(c_file: *std.c.FILE) CWriter { fn cWriterWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize { const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file); if (amt_written >= 0) return amt_written; - switch (@enumFromInt(os.E, std.c._errno().*)) { + switch (@as(os.E, @enumFromInt(std.c._errno().*))) { .SUCCESS => unreachable, .INVAL => unreachable, .FAULT => unreachable, diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index abdca56d3c55..4dde51838b15 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -246,7 +246,7 @@ pub fn Reader( /// Same as `readByte` except the returned byte is signed. pub fn readByteSigned(self: Self) (Error || error{EndOfStream})!i8 { - return @bitCast(i8, try self.readByte()); + return @as(i8, @bitCast(try self.readByte())); } /// Reads exactly `num_bytes` bytes and returns as an array. diff --git a/lib/std/json/scanner.zig b/lib/std/json/scanner.zig index 4fb7c1da0145..274faba2ff60 100644 --- a/lib/std/json/scanner.zig +++ b/lib/std/json/scanner.zig @@ -193,7 +193,7 @@ pub const TokenType = enum { /// to get meaningful information from this. pub const Diagnostics = struct { line_number: u64 = 1, - line_start_cursor: usize = @bitCast(usize, @as(isize, -1)), // Start just "before" the input buffer to get a 1-based column for line 1. + line_start_cursor: usize = @as(usize, @bitCast(@as(isize, -1))), // Start just "before" the input buffer to get a 1-based column for line 1. total_bytes_before_current_input: u64 = 0, cursor_pointer: *const usize = undefined, @@ -1719,7 +1719,7 @@ const BitStack = struct { pub fn push(self: *@This(), b: u1) Allocator.Error!void { const byte_index = self.bit_len >> 3; - const bit_index = @intCast(u3, self.bit_len & 7); + const bit_index = @as(u3, @intCast(self.bit_len & 7)); if (self.bytes.items.len <= byte_index) { try self.bytes.append(0); @@ -1733,8 +1733,8 @@ const BitStack = struct { pub fn peek(self: *const @This()) u1 { const byte_index = (self.bit_len - 1) >> 3; - const bit_index = @intCast(u3, (self.bit_len - 1) & 7); - return @intCast(u1, (self.bytes.items[byte_index] >> bit_index) & 1); + const bit_index = @as(u3, @intCast((self.bit_len - 1) & 7)); + return @as(u1, @intCast((self.bytes.items[byte_index] >> bit_index) & 1)); } pub fn pop(self: *@This()) u1 { diff --git a/lib/std/json/static.zig b/lib/std/json/static.zig index fd3d12d73a6d..f1926660f3a1 100644 --- a/lib/std/json/static.zig +++ b/lib/std/json/static.zig @@ -442,7 +442,7 @@ fn internalParse( } if (ptrInfo.sentinel) |some| { - const sentinel_value = @ptrCast(*align(1) const ptrInfo.child, some).*; + const sentinel_value = @as(*align(1) const ptrInfo.child, @ptrCast(some)).*; return try arraylist.toOwnedSliceSentinel(sentinel_value); } @@ -456,7 +456,7 @@ fn internalParse( // Use our own array list so we can append the sentinel. var value_list = ArrayList(u8).init(allocator); _ = try source.allocNextIntoArrayList(&value_list, .alloc_always); - return try value_list.toOwnedSliceSentinel(@ptrCast(*const u8, sentinel_ptr).*); + return try value_list.toOwnedSliceSentinel(@as(*const u8, @ptrCast(sentinel_ptr)).*); } if (ptrInfo.is_const) { switch (try source.nextAllocMax(allocator, .alloc_if_needed, options.max_value_len.?)) { @@ -518,8 +518,8 @@ fn internalParseFromValue( }, .Float, .ComptimeFloat => { switch (source) { - .float => |f| return @floatCast(T, f), - .integer => |i| return @floatFromInt(T, i), + .float => |f| return @as(T, @floatCast(f)), + .integer => |i| return @as(T, @floatFromInt(i)), .number_string, .string => |s| return std.fmt.parseFloat(T, s), else => return error.UnexpectedToken, } @@ -530,12 +530,12 @@ fn internalParseFromValue( if (@round(f) != f) return error.InvalidNumber; if (f > std.math.maxInt(T)) return error.Overflow; if (f < std.math.minInt(T)) return error.Overflow; - return @intFromFloat(T, f); + return @as(T, @intFromFloat(f)); }, .integer => |i| { if (i > std.math.maxInt(T)) return error.Overflow; if (i < std.math.minInt(T)) return error.Overflow; - return @intCast(T, i); + return @as(T, @intCast(i)); }, .number_string, .string => |s| { return sliceToInt(T, s); @@ -686,7 +686,7 @@ fn internalParseFromValue( switch (source) { .array => |array| { const r = if (ptrInfo.sentinel) |sentinel_ptr| - try allocator.allocSentinel(ptrInfo.child, array.items.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*) + try allocator.allocSentinel(ptrInfo.child, array.items.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*) else try allocator.alloc(ptrInfo.child, array.items.len); @@ -701,7 +701,7 @@ fn internalParseFromValue( // Dynamic length string. const r = if (ptrInfo.sentinel) |sentinel_ptr| - try allocator.allocSentinel(ptrInfo.child, s.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*) + try allocator.allocSentinel(ptrInfo.child, s.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*) else try allocator.alloc(ptrInfo.child, s.len); @memcpy(r[0..], s); @@ -743,7 +743,7 @@ fn sliceToInt(comptime T: type, slice: []const u8) !T { const float = try std.fmt.parseFloat(f128, slice); if (@round(float) != float) return error.InvalidNumber; if (float > std.math.maxInt(T) or float < std.math.minInt(T)) return error.Overflow; - return @intCast(T, @intFromFloat(i128, float)); + return @as(T, @intCast(@as(i128, @intFromFloat(float)))); } fn sliceToEnum(comptime T: type, slice: []const u8) !T { @@ -759,7 +759,7 @@ fn fillDefaultStructValues(comptime T: type, r: *T, fields_seen: *[@typeInfo(T). inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { if (!fields_seen[i]) { if (field.default_value) |default_ptr| { - const default = @ptrCast(*align(1) const field.type, default_ptr).*; + const default = @as(*align(1) const field.type, @ptrCast(default_ptr)).*; @field(r, field.name) = default; } else { return error.MissingField; diff --git a/lib/std/json/stringify.zig b/lib/std/json/stringify.zig index 6d10e9533025..5de5db54b9d7 100644 --- a/lib/std/json/stringify.zig +++ b/lib/std/json/stringify.zig @@ -78,8 +78,8 @@ fn outputUnicodeEscape( assert(codepoint <= 0x10FFFF); // To escape an extended character that is not in the Basic Multilingual Plane, // the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair. - const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800; - const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00; + const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; + const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; try out_stream.writeAll("\\u"); try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream); try out_stream.writeAll("\\u"); diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig index 760bad13fdd3..3a2750f5a150 100644 --- a/lib/std/json/write_stream.zig +++ b/lib/std/json/write_stream.zig @@ -176,8 +176,8 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { .ComptimeInt => { return self.emitNumber(@as(std.math.IntFittingRange(value, value), value)); }, - .Float, .ComptimeFloat => if (@floatCast(f64, value) == value) { - try self.stream.print("{}", .{@floatCast(f64, value)}); + .Float, .ComptimeFloat => if (@as(f64, @floatCast(value)) == value) { + try self.stream.print("{}", .{@as(f64, @floatCast(value))}); self.popState(); return; }, @@ -294,7 +294,7 @@ test "json write stream" { fn getJsonObject(allocator: std.mem.Allocator) !Value { var value = Value{ .object = ObjectMap.init(allocator) }; - try value.object.put("one", Value{ .integer = @intCast(i64, 1) }); + try value.object.put("one", Value{ .integer = @as(i64, @intCast(1)) }); try value.object.put("two", Value{ .float = 2.0 }); return value; } diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig index 859d753a6a74..33555caec58e 100644 --- a/lib/std/leb128.zig +++ b/lib/std/leb128.zig @@ -30,17 +30,17 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T { if (value > std.math.maxInt(T)) return error.Overflow; } - return @truncate(T, value); + return @as(T, @truncate(value)); } /// Write a single unsigned integer as unsigned LEB128 to the given writer. pub fn writeULEB128(writer: anytype, uint_value: anytype) !void { const T = @TypeOf(uint_value); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; - var value = @intCast(U, uint_value); + var value = @as(U, @intCast(uint_value)); while (true) { - const byte = @truncate(u8, value & 0x7f); + const byte = @as(u8, @truncate(value & 0x7f)); value >>= 7; if (value == 0) { try writer.writeByte(byte); @@ -71,18 +71,18 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { if (ov[1] != 0) { // Overflow is ok so long as the sign bit is set and this is the last byte if (byte & 0x80 != 0) return error.Overflow; - if (@bitCast(S, ov[0]) >= 0) return error.Overflow; + if (@as(S, @bitCast(ov[0])) >= 0) return error.Overflow; // and all the overflowed bits are 1 - const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift)); - const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift; + const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift))); + const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } else { // If we don't overflow and this is the last byte and the number being decoded // is negative, check that the remaining bits are 1 - if ((byte & 0x80 == 0) and (@bitCast(S, ov[0]) < 0)) { - const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift)); - const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift; + if ((byte & 0x80 == 0) and (@as(S, @bitCast(ov[0])) < 0)) { + const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift))); + const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } } @@ -92,7 +92,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { const needs_sign_ext = group + 1 < max_group; if (byte & 0x40 != 0 and needs_sign_ext) { const ones = @as(S, -1); - value |= @bitCast(U, ones) << (shift + 7); + value |= @as(U, @bitCast(ones)) << (shift + 7); } break; } @@ -100,13 +100,13 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T { return error.Overflow; } - const result = @bitCast(S, value); + const result = @as(S, @bitCast(value)); // Only applies if we extended to i8 if (S != T) { if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow; } - return @truncate(T, result); + return @as(T, @truncate(result)); } /// Write a single signed integer as signed LEB128 to the given writer. @@ -115,11 +115,11 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void { const S = if (@typeInfo(T).Int.bits < 8) i8 else T; const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits); - var value = @intCast(S, int_value); + var value = @as(S, @intCast(int_value)); while (true) { - const uvalue = @bitCast(U, value); - const byte = @truncate(u8, uvalue); + const uvalue = @as(U, @bitCast(value)); + const byte = @as(u8, @truncate(uvalue)); value >>= 6; if (value == -1 or value == 0) { try writer.writeByte(byte & 0x7F); @@ -141,15 +141,15 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void { pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void { const T = @TypeOf(int); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; - var value = @intCast(U, int); + var value = @as(U, @intCast(int)); comptime var i = 0; inline while (i < (l - 1)) : (i += 1) { - const byte = @truncate(u8, value) | 0b1000_0000; + const byte = @as(u8, @truncate(value)) | 0b1000_0000; value >>= 7; ptr[i] = byte; } - ptr[i] = @truncate(u8, value); + ptr[i] = @as(u8, @truncate(value)); } test "writeUnsignedFixed" { @@ -245,7 +245,7 @@ test "deserialize signed LEB128" { try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000); - try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @bitCast(i64, @intCast(u64, 0x8000000000000000))); + try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000))))); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000); @@ -356,7 +356,7 @@ test "serialize unsigned LEB128" { const max = std.math.maxInt(T); var i = @as(std.meta.Int(.unsigned, @typeInfo(T).Int.bits + 1), min); - while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i)); + while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i))); } } @@ -374,6 +374,6 @@ test "serialize signed LEB128" { const max = std.math.maxInt(T); var i = @as(std.meta.Int(.signed, @typeInfo(T).Int.bits + 1), min); - while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i)); + while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i))); } } diff --git a/lib/std/macho.zig b/lib/std/macho.zig index 8bddd67023ca..1b886e2d903a 100644 --- a/lib/std/macho.zig +++ b/lib/std/macho.zig @@ -787,7 +787,7 @@ pub const section_64 = extern struct { } pub fn @"type"(sect: section_64) u8 { - return @truncate(u8, sect.flags & 0xff); + return @as(u8, @truncate(sect.flags & 0xff)); } pub fn attrs(sect: section_64) u32 { @@ -1870,7 +1870,7 @@ pub const LoadCommandIterator = struct { pub fn cast(lc: LoadCommand, comptime Cmd: type) ?Cmd { if (lc.data.len < @sizeOf(Cmd)) return null; - return @ptrCast(*const Cmd, @alignCast(@alignOf(Cmd), &lc.data[0])).*; + return @as(*const Cmd, @ptrCast(@alignCast(&lc.data[0]))).*; } /// Asserts LoadCommand is of type segment_command_64. @@ -1878,9 +1878,9 @@ pub const LoadCommandIterator = struct { const segment_lc = lc.cast(segment_command_64).?; if (segment_lc.nsects == 0) return &[0]section_64{}; const data = lc.data[@sizeOf(segment_command_64)..]; - const sections = @ptrCast( + const sections = @as( [*]const section_64, - @alignCast(@alignOf(section_64), &data[0]), + @ptrCast(@alignCast(&data[0])), )[0..segment_lc.nsects]; return sections; } @@ -1903,16 +1903,16 @@ pub const LoadCommandIterator = struct { pub fn next(it: *LoadCommandIterator) ?LoadCommand { if (it.index >= it.ncmds) return null; - const hdr = @ptrCast( + const hdr = @as( *const load_command, - @alignCast(@alignOf(load_command), &it.buffer[0]), + @ptrCast(@alignCast(&it.buffer[0])), ).*; const cmd = LoadCommand{ .hdr = hdr, .data = it.buffer[0..hdr.cmdsize], }; - it.buffer = @alignCast(@alignOf(u64), it.buffer[hdr.cmdsize..]); + it.buffer = @alignCast(it.buffer[hdr.cmdsize..]); it.index += 1; return cmd; diff --git a/lib/std/math.zig b/lib/std/math.zig index c7d354f78792..2a6c24bcb47e 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -85,31 +85,31 @@ pub const inf_f128 = @compileError("Deprecated: use `inf(f128)` instead"); pub const epsilon = @compileError("Deprecated: use `floatEps` instead"); pub const nan_u16 = @as(u16, 0x7C01); -pub const nan_f16 = @bitCast(f16, nan_u16); +pub const nan_f16 = @as(f16, @bitCast(nan_u16)); pub const qnan_u16 = @as(u16, 0x7E00); -pub const qnan_f16 = @bitCast(f16, qnan_u16); +pub const qnan_f16 = @as(f16, @bitCast(qnan_u16)); pub const nan_u32 = @as(u32, 0x7F800001); -pub const nan_f32 = @bitCast(f32, nan_u32); +pub const nan_f32 = @as(f32, @bitCast(nan_u32)); pub const qnan_u32 = @as(u32, 0x7FC00000); -pub const qnan_f32 = @bitCast(f32, qnan_u32); +pub const qnan_f32 = @as(f32, @bitCast(qnan_u32)); pub const nan_u64 = @as(u64, 0x7FF << 52) | 1; -pub const nan_f64 = @bitCast(f64, nan_u64); +pub const nan_f64 = @as(f64, @bitCast(nan_u64)); pub const qnan_u64 = @as(u64, 0x7ff8000000000000); -pub const qnan_f64 = @bitCast(f64, qnan_u64); +pub const qnan_f64 = @as(f64, @bitCast(qnan_u64)); pub const nan_f80 = make_f80(F80{ .fraction = 0xA000000000000000, .exp = 0x7fff }); pub const qnan_f80 = make_f80(F80{ .fraction = 0xC000000000000000, .exp = 0x7fff }); pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001); -pub const nan_f128 = @bitCast(f128, nan_u128); +pub const nan_f128 = @as(f128, @bitCast(nan_u128)); pub const qnan_u128 = @as(u128, 0x7fff8000000000000000000000000000); -pub const qnan_f128 = @bitCast(f128, qnan_u128); +pub const qnan_f128 = @as(f128, @bitCast(qnan_u128)); pub const nan = @import("math/nan.zig").nan; pub const snan = @import("math/nan.zig").snan; @@ -508,10 +508,10 @@ pub fn shl(comptime T: type, a: T, shift_amt: anytype) T { const C = @typeInfo(T).Vector.child; const len = @typeInfo(T).Vector.len; if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0)); - break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt)); + break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt))); } else { if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0; - break :blk @intCast(Log2Int(T), abs_shift_amt); + break :blk @as(Log2Int(T), @intCast(abs_shift_amt)); } }; @@ -552,10 +552,10 @@ pub fn shr(comptime T: type, a: T, shift_amt: anytype) T { const C = @typeInfo(T).Vector.child; const len = @typeInfo(T).Vector.len; if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0)); - break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt)); + break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt))); } else { if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0; - break :blk @intCast(Log2Int(T), abs_shift_amt); + break :blk @as(Log2Int(T), @intCast(abs_shift_amt)); } }; @@ -596,7 +596,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T { if (@typeInfo(C).Int.signedness == .signed) { @compileError("cannot rotate signed integers"); } - const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits)); + const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits))); return (x >> @splat(@typeInfo(T).Vector.len, ar)) | (x << @splat(@typeInfo(T).Vector.len, 1 + ~ar)); } else if (@typeInfo(T).Int.signedness == .signed) { @compileError("cannot rotate signed integer"); @@ -604,7 +604,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T { if (T == u0) return 0; if (isPowerOfTwo(@typeInfo(T).Int.bits)) { - const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits)); + const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits))); return x >> ar | x << (1 +% ~ar); } else { const ar = @mod(r, @typeInfo(T).Int.bits); @@ -640,7 +640,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T { if (@typeInfo(C).Int.signedness == .signed) { @compileError("cannot rotate signed integers"); } - const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits)); + const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits))); return (x << @splat(@typeInfo(T).Vector.len, ar)) | (x >> @splat(@typeInfo(T).Vector.len, 1 +% ~ar)); } else if (@typeInfo(T).Int.signedness == .signed) { @compileError("cannot rotate signed integer"); @@ -648,7 +648,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T { if (T == u0) return 0; if (isPowerOfTwo(@typeInfo(T).Int.bits)) { - const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits)); + const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits))); return x << ar | x >> 1 +% ~ar; } else { const ar = @mod(r, @typeInfo(T).Int.bits); @@ -1029,9 +1029,9 @@ pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) { if (int_info.signedness == .unsigned) return x; const Uint = std.meta.Int(.unsigned, int_info.bits); if (x < 0) { - return ~@bitCast(Uint, x +% -1); + return ~@as(Uint, @bitCast(x +% -1)); } else { - return @intCast(Uint, x); + return @as(Uint, @intCast(x)); } }, else => unreachable, @@ -1056,7 +1056,7 @@ pub fn negateCast(x: anytype) !std.meta.Int(.signed, @bitSizeOf(@TypeOf(x))) { if (x == -minInt(int)) return minInt(int); - return -@intCast(int, x); + return -@as(int, @intCast(x)); } test "negateCast" { @@ -1080,7 +1080,7 @@ pub fn cast(comptime T: type, x: anytype) ?T { } else if ((is_comptime or minInt(@TypeOf(x)) < minInt(T)) and x < minInt(T)) { return null; } else { - return @intCast(T, x); + return @as(T, @intCast(x)); } } @@ -1102,13 +1102,19 @@ test "cast" { pub const AlignCastError = error{UnalignedMemory}; +fn AlignCastResult(comptime alignment: u29, comptime Ptr: type) type { + var ptr_info = @typeInfo(Ptr); + ptr_info.Pointer.alignment = alignment; + return @Type(ptr_info); +} + /// Align cast a pointer but return an error if it's the wrong alignment -pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) { +pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) { const addr = @intFromPtr(ptr); if (addr % alignment != 0) { return error.UnalignedMemory; } - return @alignCast(alignment, ptr); + return @alignCast(ptr); } /// Asserts `int > 0`. @@ -1172,7 +1178,7 @@ pub inline fn floor(value: anytype) @TypeOf(value) { pub fn floorPowerOfTwo(comptime T: type, value: T) T { const uT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); if (value <= 0) return 0; - return @as(T, 1) << log2_int(uT, @intCast(uT, value)); + return @as(T, 1) << log2_int(uT, @as(uT, @intCast(value))); } test "floorPowerOfTwo" { @@ -1211,7 +1217,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo( assert(value != 0); const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1); const ShiftType = std.math.Log2Int(PromotedType); - return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(value - 1)); + return @as(PromotedType, 1) << @as(ShiftType, @intCast(@typeInfo(T).Int.bits - @clz(value - 1))); } /// Returns the next power of two (if the value is not already a power of two). @@ -1227,7 +1233,7 @@ pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) { if (overflowBit & x != 0) { return error.Overflow; } - return @intCast(T, x); + return @as(T, @intCast(x)); } /// Returns the next power of two (if the value is not already a power @@ -1277,7 +1283,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) { if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned) @compileError("log2_int requires an unsigned integer, found " ++ @typeName(T)); assert(x != 0); - return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(x)); + return @as(Log2Int(T), @intCast(@typeInfo(T).Int.bits - 1 - @clz(x))); } /// Return the log base 2 of integer value x, rounding up to the @@ -1311,8 +1317,8 @@ pub fn lossyCast(comptime T: type, value: anytype) T { switch (@typeInfo(T)) { .Float => { switch (@typeInfo(@TypeOf(value))) { - .Int => return @floatFromInt(T, value), - .Float => return @floatCast(T, value), + .Int => return @as(T, @floatFromInt(value)), + .Float => return @as(T, @floatCast(value)), .ComptimeInt => return @as(T, value), .ComptimeFloat => return @as(T, value), else => @compileError("bad type"), @@ -1326,7 +1332,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T { } else if (value <= minInt(T)) { return @as(T, minInt(T)); } else { - return @intCast(T, value); + return @as(T, @intCast(value)); } }, .Float, .ComptimeFloat => { @@ -1335,7 +1341,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T { } else if (value <= minInt(T)) { return @as(T, minInt(T)); } else { - return @intFromFloat(T, value); + return @as(T, @intFromFloat(value)); } }, else => @compileError("bad type"), @@ -1594,7 +1600,7 @@ test "compare between signed and unsigned" { try testing.expect(compare(@as(u8, 255), .gt, @as(i9, -1))); try testing.expect(!compare(@as(u8, 255), .lte, @as(i9, -1))); try testing.expect(compare(@as(u8, 1), .lt, @as(u8, 2))); - try testing.expect(@bitCast(u8, @as(i8, -1)) == @as(u8, 255)); + try testing.expect(@as(u8, @bitCast(@as(i8, -1))) == @as(u8, 255)); try testing.expect(!compare(@as(u8, 255), .eq, @as(i8, -1))); try testing.expect(compare(@as(u8, 1), .eq, @as(u8, 1))); } @@ -1624,7 +1630,7 @@ test "order.compare" { test "compare.reverse" { inline for (@typeInfo(CompareOperator).Enum.fields) |op_field| { - const op = @enumFromInt(CompareOperator, op_field.value); + const op = @as(CompareOperator, @enumFromInt(op_field.value)); try testing.expect(compare(2, op, 3) == compare(3, op.reverse(), 2)); try testing.expect(compare(3, op, 3) == compare(3, op.reverse(), 3)); try testing.expect(compare(4, op, 3) == compare(3, op.reverse(), 4)); @@ -1646,10 +1652,10 @@ pub inline fn boolMask(comptime MaskInt: type, value: bool) MaskInt { if (MaskInt == u1) return @intFromBool(value); if (MaskInt == i1) { // The @as here is a workaround for #7950 - return @bitCast(i1, @as(u1, @intFromBool(value))); + return @as(i1, @bitCast(@as(u1, @intFromBool(value)))); } - return -%@intCast(MaskInt, @intFromBool(value)); + return -%@as(MaskInt, @intCast(@intFromBool(value))); } test "boolMask" { @@ -1680,7 +1686,7 @@ test "boolMask" { /// Return the mod of `num` with the smallest integer type pub fn comptimeMod(num: anytype, comptime denom: comptime_int) IntFittingRange(0, denom - 1) { - return @intCast(IntFittingRange(0, denom - 1), @mod(num, denom)); + return @as(IntFittingRange(0, denom - 1), @intCast(@mod(num, denom))); } pub const F80 = struct { @@ -1690,14 +1696,14 @@ pub const F80 = struct { pub fn make_f80(repr: F80) f80 { const int = (@as(u80, repr.exp) << 64) | repr.fraction; - return @bitCast(f80, int); + return @as(f80, @bitCast(int)); } pub fn break_f80(x: f80) F80 { - const int = @bitCast(u80, x); + const int = @as(u80, @bitCast(x)); return .{ - .fraction = @truncate(u64, int), - .exp = @truncate(u16, int >> 64), + .fraction = @as(u64, @truncate(int)), + .exp = @as(u16, @truncate(int >> 64)), }; } @@ -1709,7 +1715,7 @@ pub inline fn sign(i: anytype) @TypeOf(i) { const T = @TypeOf(i); return switch (@typeInfo(T)) { .Int, .ComptimeInt => @as(T, @intFromBool(i > 0)) - @as(T, @intFromBool(i < 0)), - .Float, .ComptimeFloat => @floatFromInt(T, @intFromBool(i > 0)) - @floatFromInt(T, @intFromBool(i < 0)), + .Float, .ComptimeFloat => @as(T, @floatFromInt(@intFromBool(i > 0))) - @as(T, @floatFromInt(@intFromBool(i < 0))), .Vector => |vinfo| blk: { switch (@typeInfo(vinfo.child)) { .Int, .Float => { diff --git a/lib/std/math/acos.zig b/lib/std/math/acos.zig index e88bed72277b..1a29ca7b5437 100644 --- a/lib/std/math/acos.zig +++ b/lib/std/math/acos.zig @@ -36,7 +36,7 @@ fn acos32(x: f32) f32 { const pio2_hi = 1.5707962513e+00; const pio2_lo = 7.5497894159e-08; - const hx: u32 = @bitCast(u32, x); + const hx: u32 = @as(u32, @bitCast(x)); const ix: u32 = hx & 0x7FFFFFFF; // |x| >= 1 or nan @@ -72,8 +72,8 @@ fn acos32(x: f32) f32 { // x > 0.5 const z = (1.0 - x) * 0.5; const s = @sqrt(z); - const jx = @bitCast(u32, s); - const df = @bitCast(f32, jx & 0xFFFFF000); + const jx = @as(u32, @bitCast(s)); + const df = @as(f32, @bitCast(jx & 0xFFFFF000)); const c = (z - df * df) / (s + df); const w = r32(z) * s + c; return 2 * (df + w); @@ -100,13 +100,13 @@ fn acos64(x: f64) f64 { const pio2_hi: f64 = 1.57079632679489655800e+00; const pio2_lo: f64 = 6.12323399573676603587e-17; - const ux = @bitCast(u64, x); - const hx = @intCast(u32, ux >> 32); + const ux = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(ux >> 32)); const ix = hx & 0x7FFFFFFF; // |x| >= 1 or nan if (ix >= 0x3FF00000) { - const lx = @intCast(u32, ux & 0xFFFFFFFF); + const lx = @as(u32, @intCast(ux & 0xFFFFFFFF)); // acos(1) = 0, acos(-1) = pi if ((ix - 0x3FF00000) | lx == 0) { @@ -141,8 +141,8 @@ fn acos64(x: f64) f64 { // x > 0.5 const z = (1.0 - x) * 0.5; const s = @sqrt(z); - const jx = @bitCast(u64, s); - const df = @bitCast(f64, jx & 0xFFFFFFFF00000000); + const jx = @as(u64, @bitCast(s)); + const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000)); const c = (z - df * df) / (s + df); const w = r64(z) * s + c; return 2 * (df + w); diff --git a/lib/std/math/acosh.zig b/lib/std/math/acosh.zig index a78130d2ef07..0c6de9933eea 100644 --- a/lib/std/math/acosh.zig +++ b/lib/std/math/acosh.zig @@ -24,7 +24,7 @@ pub fn acosh(x: anytype) @TypeOf(x) { // acosh(x) = log(x + sqrt(x * x - 1)) fn acosh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const i = u & 0x7FFFFFFF; // |x| < 2, invalid if x < 1 or nan @@ -42,7 +42,7 @@ fn acosh32(x: f32) f32 { } fn acosh64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; // |x| < 2, invalid if x < 1 or nan diff --git a/lib/std/math/asin.zig b/lib/std/math/asin.zig index 48ad04c579cb..ac1d01ff55ad 100644 --- a/lib/std/math/asin.zig +++ b/lib/std/math/asin.zig @@ -36,7 +36,7 @@ fn r32(z: f32) f32 { fn asin32(x: f32) f32 { const pio2 = 1.570796326794896558e+00; - const hx: u32 = @bitCast(u32, x); + const hx: u32 = @as(u32, @bitCast(x)); const ix: u32 = hx & 0x7FFFFFFF; // |x| >= 1 @@ -92,13 +92,13 @@ fn asin64(x: f64) f64 { const pio2_hi: f64 = 1.57079632679489655800e+00; const pio2_lo: f64 = 6.12323399573676603587e-17; - const ux = @bitCast(u64, x); - const hx = @intCast(u32, ux >> 32); + const ux = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(ux >> 32)); const ix = hx & 0x7FFFFFFF; // |x| >= 1 or nan if (ix >= 0x3FF00000) { - const lx = @intCast(u32, ux & 0xFFFFFFFF); + const lx = @as(u32, @intCast(ux & 0xFFFFFFFF)); // asin(1) = +-pi/2 with inexact if ((ix - 0x3FF00000) | lx == 0) { @@ -128,8 +128,8 @@ fn asin64(x: f64) f64 { if (ix >= 0x3FEF3333) { fx = pio2_hi - 2 * (s + s * r); } else { - const jx = @bitCast(u64, s); - const df = @bitCast(f64, jx & 0xFFFFFFFF00000000); + const jx = @as(u64, @bitCast(s)); + const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000)); const c = (z - df * df) / (s + df); fx = 0.5 * pio2_hi - (2 * s * r - (pio2_lo - 2 * c) - (0.5 * pio2_hi - 2 * df)); } diff --git a/lib/std/math/asinh.zig b/lib/std/math/asinh.zig index 65028ef5d9dc..13b1045bf608 100644 --- a/lib/std/math/asinh.zig +++ b/lib/std/math/asinh.zig @@ -26,11 +26,11 @@ pub fn asinh(x: anytype) @TypeOf(x) { // asinh(x) = sign(x) * log(|x| + sqrt(x * x + 1)) ~= x - x^3/6 + o(x^5) fn asinh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const i = u & 0x7FFFFFFF; const s = i >> 31; - var rx = @bitCast(f32, i); // |x| + var rx = @as(f32, @bitCast(i)); // |x| // TODO: Shouldn't need this explicit check. if (math.isNegativeInf(x)) { @@ -58,11 +58,11 @@ fn asinh32(x: f32) f32 { } fn asinh64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; const s = e >> 63; - var rx = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x| + var rx = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x| if (math.isNegativeInf(x)) { return x; diff --git a/lib/std/math/atan.zig b/lib/std/math/atan.zig index 41caae11a6a8..75be6ea7460f 100644 --- a/lib/std/math/atan.zig +++ b/lib/std/math/atan.zig @@ -46,7 +46,7 @@ fn atan32(x_: f32) f32 { }; var x = x_; - var ix: u32 = @bitCast(u32, x); + var ix: u32 = @as(u32, @bitCast(x)); const sign = ix >> 31; ix &= 0x7FFFFFFF; @@ -143,8 +143,8 @@ fn atan64(x_: f64) f64 { }; var x = x_; - var ux = @bitCast(u64, x); - var ix = @intCast(u32, ux >> 32); + var ux = @as(u64, @bitCast(x)); + var ix = @as(u32, @intCast(ux >> 32)); const sign = ix >> 31; ix &= 0x7FFFFFFF; @@ -165,7 +165,7 @@ fn atan64(x_: f64) f64 { // |x| < 2^(-27) if (ix < 0x3E400000) { if (ix < 0x00100000) { - math.doNotOptimizeAway(@floatCast(f32, x)); + math.doNotOptimizeAway(@as(f32, @floatCast(x))); } return x; } @@ -212,7 +212,7 @@ fn atan64(x_: f64) f64 { } test "math.atan" { - try expect(@bitCast(u32, atan(@as(f32, 0.2))) == @bitCast(u32, atan32(0.2))); + try expect(@as(u32, @bitCast(atan(@as(f32, 0.2)))) == @as(u32, @bitCast(atan32(0.2)))); try expect(atan(@as(f64, 0.2)) == atan64(0.2)); } diff --git a/lib/std/math/atan2.zig b/lib/std/math/atan2.zig index b9b37e7da424..026c76b5b2cf 100644 --- a/lib/std/math/atan2.zig +++ b/lib/std/math/atan2.zig @@ -44,8 +44,8 @@ fn atan2_32(y: f32, x: f32) f32 { return x + y; } - var ix = @bitCast(u32, x); - var iy = @bitCast(u32, y); + var ix = @as(u32, @bitCast(x)); + var iy = @as(u32, @bitCast(y)); // x = 1.0 if (ix == 0x3F800000) { @@ -129,13 +129,13 @@ fn atan2_64(y: f64, x: f64) f64 { return x + y; } - var ux = @bitCast(u64, x); - var ix = @intCast(u32, ux >> 32); - var lx = @intCast(u32, ux & 0xFFFFFFFF); + var ux = @as(u64, @bitCast(x)); + var ix = @as(u32, @intCast(ux >> 32)); + var lx = @as(u32, @intCast(ux & 0xFFFFFFFF)); - var uy = @bitCast(u64, y); - var iy = @intCast(u32, uy >> 32); - var ly = @intCast(u32, uy & 0xFFFFFFFF); + var uy = @as(u64, @bitCast(y)); + var iy = @as(u32, @intCast(uy >> 32)); + var ly = @as(u32, @intCast(uy & 0xFFFFFFFF)); // x = 1.0 if ((ix -% 0x3FF00000) | lx == 0) { diff --git a/lib/std/math/atanh.zig b/lib/std/math/atanh.zig index aed5d8bca842..58b56ac8faae 100644 --- a/lib/std/math/atanh.zig +++ b/lib/std/math/atanh.zig @@ -26,11 +26,11 @@ pub fn atanh(x: anytype) @TypeOf(x) { // atanh(x) = log((1 + x) / (1 - x)) / 2 = log1p(2x / (1 - x)) / 2 ~= x + x^3 / 3 + o(x^5) fn atanh_32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const i = u & 0x7FFFFFFF; const s = u >> 31; - var y = @bitCast(f32, i); // |x| + var y = @as(f32, @bitCast(i)); // |x| if (y == 1.0) { return math.copysign(math.inf(f32), x); @@ -55,11 +55,11 @@ fn atanh_32(x: f32) f32 { } fn atanh_64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const e = (u >> 52) & 0x7FF; const s = u >> 63; - var y = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x| + var y = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x| if (y == 1.0) { return math.copysign(math.inf(f64), x); @@ -69,7 +69,7 @@ fn atanh_64(x: f64) f64 { if (e < 0x3FF - 32) { // underflow if (e == 0) { - math.doNotOptimizeAway(@floatCast(f32, y)); + math.doNotOptimizeAway(@as(f32, @floatCast(y))); } } // |x| < 0.5 diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 846a809e0565..213876ccadba 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -30,7 +30,7 @@ pub fn calcLimbLen(scalar: anytype) usize { } const w_value = std.math.absCast(scalar); - return @intCast(usize, @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1); + return @as(usize, @intCast(@divFloor(@as(Limb, @intCast(math.log2(w_value))), limb_bits) + 1)); } pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize { @@ -87,8 +87,8 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { // r2 = b * c const bc = @as(DoubleLimb, math.mulWide(Limb, b, c)); - const r2 = @truncate(Limb, bc); - const c2 = @truncate(Limb, bc >> limb_bits); + const r2 = @as(Limb, @truncate(bc)); + const c2 = @as(Limb, @truncate(bc >> limb_bits)); // ov2[0] = ov1[0] + r2 const ov2 = @addWithOverflow(ov1[0], r2); @@ -107,8 +107,8 @@ fn subMulLimbWithBorrow(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { // r2 = b * c const bc = @as(DoubleLimb, std.math.mulWide(Limb, b, c)); - const r2 = @truncate(Limb, bc); - const c2 = @truncate(Limb, bc >> limb_bits); + const r2 = @as(Limb, @truncate(bc)); + const c2 = @as(Limb, @truncate(bc >> limb_bits)); // ov2[0] = ov1[0] - r2 const ov2 = @subWithOverflow(ov1[0], r2); @@ -244,7 +244,7 @@ pub const Mutable = struct { } else { var i: usize = 0; while (true) : (i += 1) { - self.limbs[i] = @truncate(Limb, w_value); + self.limbs[i] = @as(Limb, @truncate(w_value)); w_value >>= limb_bits; if (w_value == 0) break; @@ -340,7 +340,7 @@ pub const Mutable = struct { } const req_limbs = calcTwosCompLimbCount(bit_count); - const bit = @truncate(Log2Limb, bit_count - 1); + const bit = @as(Log2Limb, @truncate(bit_count - 1)); const signmask = @as(Limb, 1) << bit; // 0b0..010..0 where 1 is the sign bit. const mask = (signmask << 1) -% 1; // 0b0..011..1 where the leftmost 1 is the sign bit. @@ -365,7 +365,7 @@ pub const Mutable = struct { r.set(0); } else { const new_req_limbs = calcTwosCompLimbCount(bit_count - 1); - const msb = @truncate(Log2Limb, bit_count - 2); + const msb = @as(Log2Limb, @truncate(bit_count - 2)); const new_signmask = @as(Limb, 1) << msb; // 0b0..010..0 where 1 is the sign bit. const new_mask = (new_signmask << 1) -% 1; // 0b0..001..1 where the rightmost 0 is the sign bit. @@ -1153,7 +1153,7 @@ pub const Mutable = struct { // const msb = @truncate(Log2Limb, checkbit); // const checkmask = (@as(Limb, 1) << msb) -% 1; - if (a.limbs[a.limbs.len - 1] >> @truncate(Log2Limb, checkbit) != 0) { + if (a.limbs[a.limbs.len - 1] >> @as(Log2Limb, @truncate(checkbit)) != 0) { // Need to saturate. r.setTwosCompIntLimit(if (a.positive) .max else .min, signedness, bit_count); return; @@ -1554,7 +1554,7 @@ pub const Mutable = struct { // Optimization for small divisor. By using a half limb we can avoid requiring DoubleLimb // divisions in the hot code path. This may often require compiler_rt software-emulation. if (divisor < maxInt(HalfLimb)) { - lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @intCast(HalfLimb, divisor)); + lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @as(HalfLimb, @intCast(divisor))); } else { lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], divisor); } @@ -1671,7 +1671,7 @@ pub const Mutable = struct { } else { const q0 = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]); const n0 = @as(DoubleLimb, y.limbs[t]); - q.limbs[k] = @intCast(Limb, q0 / n0); + q.limbs[k] = @as(Limb, @intCast(q0 / n0)); } // 3.2 @@ -1750,7 +1750,7 @@ pub const Mutable = struct { return; } - const bit = @truncate(Log2Limb, bit_count - 1); + const bit = @as(Log2Limb, @truncate(bit_count - 1)); const signmask = @as(Limb, 1) << bit; const mask = (signmask << 1) -% 1; @@ -1781,7 +1781,7 @@ pub const Mutable = struct { return; } - const bit = @truncate(Log2Limb, bit_count - 1); + const bit = @as(Log2Limb, @truncate(bit_count - 1)); const signmask = @as(Limb, 1) << bit; // 0b0..010...0 where 1 is the sign bit. const mask = (signmask << 1) -% 1; // 0b0..01..1 where the leftmost 1 is the sign bit. @@ -1912,7 +1912,7 @@ pub const Mutable = struct { .Big => buffer.len - ((total_bits + 7) / 8), }; - const sign_bit = @as(u8, 1) << @intCast(u3, (total_bits - 1) % 8); + const sign_bit = @as(u8, 1) << @as(u3, @intCast((total_bits - 1) % 8)); positive = ((buffer[last_byte] & sign_bit) == 0); } @@ -1942,7 +1942,7 @@ pub const Mutable = struct { .signed => b: { const SLimb = std.meta.Int(.signed, @bitSizeOf(Limb)); const limb = mem.readVarPackedInt(SLimb, buffer, bit_index + bit_offset, bit_count - bit_index, endian, .signed); - break :b @bitCast(Limb, limb); + break :b @as(Limb, @bitCast(limb)); }, }; @@ -2170,7 +2170,7 @@ pub const Const = struct { var r: UT = 0; if (@sizeOf(UT) <= @sizeOf(Limb)) { - r = @intCast(UT, self.limbs[0]); + r = @as(UT, @intCast(self.limbs[0])); } else { for (self.limbs[0..self.limbs.len], 0..) |_, ri| { const limb = self.limbs[self.limbs.len - ri - 1]; @@ -2180,10 +2180,10 @@ pub const Const = struct { } if (info.signedness == .unsigned) { - return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned; + return if (self.positive) @as(T, @intCast(r)) else error.NegativeIntoUnsigned; } else { if (self.positive) { - return @intCast(T, r); + return @as(T, @intCast(r)); } else { if (math.cast(T, r)) |ok| { return -ok; @@ -2292,7 +2292,7 @@ pub const Const = struct { outer: for (self.limbs[0..self.limbs.len]) |limb| { var shift: usize = 0; while (shift < limb_bits) : (shift += base_shift) { - const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1)); + const r = @as(u8, @intCast((limb >> @as(Log2Limb, @intCast(shift))) & @as(Limb, base - 1))); const ch = std.fmt.digitToChar(r, case); string[digits_len] = ch; digits_len += 1; @@ -2340,7 +2340,7 @@ pub const Const = struct { var r_word = r.limbs[0]; var i: usize = 0; while (i < digits_per_limb) : (i += 1) { - const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case); + const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case); r_word /= base; string[digits_len] = ch; digits_len += 1; @@ -2352,7 +2352,7 @@ pub const Const = struct { var r_word = q.limbs[0]; while (r_word != 0) { - const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case); + const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case); r_word /= base; string[digits_len] = ch; digits_len += 1; @@ -3680,13 +3680,13 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { rem.* = 0; } else if (pdiv < b) { quo[i] = 0; - rem.* = @truncate(Limb, pdiv); + rem.* = @as(Limb, @truncate(pdiv)); } else if (pdiv == b) { quo[i] = 1; rem.* = 0; } else { - quo[i] = @truncate(Limb, @divTrunc(pdiv, b)); - rem.* = @truncate(Limb, pdiv - (quo[i] *% b)); + quo[i] = @as(Limb, @truncate(@divTrunc(pdiv, b))); + rem.* = @as(Limb, @truncate(pdiv - (quo[i] *% b))); } } } @@ -3719,7 +3719,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { @setRuntimeSafety(debug_safety); assert(a.len >= 1); - const interior_limb_shift = @truncate(Log2Limb, shift); + const interior_limb_shift = @as(Log2Limb, @truncate(shift)); // We only need the extra limb if the shift of the last element overflows. // This is useful for the implementation of `shiftLeftSat`. @@ -3741,7 +3741,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { r[dst_i] = carry | @call(.always_inline, math.shr, .{ Limb, src_digit, - limb_bits - @intCast(Limb, interior_limb_shift), + limb_bits - @as(Limb, @intCast(interior_limb_shift)), }); carry = (src_digit << interior_limb_shift); } @@ -3756,7 +3756,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void { assert(r.len >= a.len - (shift / limb_bits)); const limb_shift = shift / limb_bits; - const interior_limb_shift = @truncate(Log2Limb, shift); + const interior_limb_shift = @as(Log2Limb, @truncate(shift)); var carry: Limb = 0; var i: usize = 0; @@ -3769,7 +3769,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void { carry = @call(.always_inline, math.shl, .{ Limb, src_digit, - limb_bits - @intCast(Limb, interior_limb_shift), + limb_bits - @as(Limb, @intCast(interior_limb_shift)), }); } } @@ -4150,7 +4150,7 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void { // Square the result if the current bit is zero, square and multiply by a if // it is one. var exp_bits = 32 - 1 - b_leading_zeros; - var exp = b << @intCast(u5, 1 + b_leading_zeros); + var exp = b << @as(u5, @intCast(1 + b_leading_zeros)); var i: usize = 0; while (i < exp_bits) : (i += 1) { @@ -4174,9 +4174,9 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable { assert(storage.len >= 2); const A_is_positive = A >= 0; - const Au = @intCast(DoubleLimb, if (A < 0) -A else A); - storage[0] = @truncate(Limb, Au); - storage[1] = @truncate(Limb, Au >> limb_bits); + const Au = @as(DoubleLimb, @intCast(if (A < 0) -A else A)); + storage[0] = @as(Limb, @truncate(Au)); + storage[1] = @as(Limb, @truncate(Au >> limb_bits)); return .{ .limbs = storage[0..2], .positive = A_is_positive, diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 9c3c1b68815f..3eaa46d7c143 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -2898,19 +2898,19 @@ test "big int conversion write twos complement with padding" { buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa }; m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq); buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq); buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa }; m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq); buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }; m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned); - try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq); + try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq); bit_count = 12 * 8 + 2; @@ -3014,20 +3014,20 @@ test "big int bit reverse" { try bitReverseTest(u96, 0x123456789abcdef111213141, 0x828c84888f7b3d591e6a2c48); try bitReverseTest(u128, 0x123456789abcdef11121314151617181, 0x818e868a828c84888f7b3d591e6a2c48); - try bitReverseTest(i8, @bitCast(i8, @as(u8, 0x92)), @bitCast(i8, @as(u8, 0x49))); - try bitReverseTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x2c48))); - try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x6a2c48))); - try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x12345f)), @bitCast(i24, @as(u24, 0xfa2c48))); - try bitReverseTest(i24, @bitCast(i24, @as(u24, 0xf23456)), @bitCast(i24, @as(u24, 0x6a2c4f))); - try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x1e6a2c48))); - try bitReverseTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), @bitCast(i32, @as(u32, 0x1e6a2c4f))); - try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x1234567f)), @bitCast(i32, @as(u32, 0xfe6a2c48))); - try bitReverseTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x591e6a2c48))); - try bitReverseTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0x3d591e6a2c48))); - try bitReverseTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0x7b3d591e6a2c48))); - try bitReverseTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48))); - try bitReverseTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48))); - try bitReverseTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48))); + try bitReverseTest(i8, @as(i8, @bitCast(@as(u8, 0x92))), @as(i8, @bitCast(@as(u8, 0x49)))); + try bitReverseTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x2c48)))); + try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x6a2c48)))); + try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x12345f))), @as(i24, @bitCast(@as(u24, 0xfa2c48)))); + try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), @as(i24, @bitCast(@as(u24, 0x6a2c4f)))); + try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c48)))); + try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c4f)))); + try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x1234567f))), @as(i32, @bitCast(@as(u32, 0xfe6a2c48)))); + try bitReverseTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x591e6a2c48)))); + try bitReverseTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48)))); + try bitReverseTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48)))); + try bitReverseTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48)))); + try bitReverseTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48)))); + try bitReverseTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48)))); } fn byteSwapTest(comptime T: type, comptime input: comptime_int, comptime expected_output: comptime_int) !void { @@ -3063,16 +3063,16 @@ test "big int byte swap" { try byteSwapTest(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412); try byteSwapTest(i8, -50, -50); - try byteSwapTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412))); - try byteSwapTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412))); - try byteSwapTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412))); - try byteSwapTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x9a78563412))); - try byteSwapTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412))); - try byteSwapTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412))); - try byteSwapTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412))); - try byteSwapTest(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412))); - try byteSwapTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412))); - try byteSwapTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412))); + try byteSwapTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412)))); + try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412)))); + try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412)))); + try byteSwapTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x9a78563412)))); + try byteSwapTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); + try byteSwapTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412)))); + try byteSwapTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412)))); + try byteSwapTest(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412)))); + try byteSwapTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412)))); + try byteSwapTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412)))); try byteSwapTest(u512, 0x80, 1 << 511); try byteSwapTest(i512, 0x80, minInt(i512)); @@ -3080,11 +3080,11 @@ test "big int byte swap" { try byteSwapTest(i512, -0x100, (1 << 504) - 1); try byteSwapTest(i400, -0x100, (1 << 392) - 1); try byteSwapTest(i400, -0x2, -(1 << 392) - 1); - try byteSwapTest(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2); - try byteSwapTest(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412))); - try byteSwapTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2); - try byteSwapTest(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412))); - try byteSwapTest(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412))); + try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2); + try byteSwapTest(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412)))); + try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2); + try byteSwapTest(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412)))); + try byteSwapTest(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); } test "big.int mul multi-multi alias r with a and b" { diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index 22f7ba183ff4..5313380c279e 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -137,7 +137,7 @@ pub const Rational = struct { debug.assert(@typeInfo(T) == .Float); const UnsignedInt = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - const f_bits = @bitCast(UnsignedInt, f); + const f_bits = @as(UnsignedInt, @bitCast(f)); const exponent_bits = math.floatExponentBits(T); const exponent_bias = (1 << (exponent_bits - 1)) - 1; @@ -146,7 +146,7 @@ pub const Rational = struct { const exponent_mask = (1 << exponent_bits) - 1; const mantissa_mask = (1 << mantissa_bits) - 1; - var exponent = @intCast(i16, (f_bits >> mantissa_bits) & exponent_mask); + var exponent = @as(i16, @intCast((f_bits >> mantissa_bits) & exponent_mask)); var mantissa = f_bits & mantissa_mask; switch (exponent) { @@ -177,9 +177,9 @@ pub const Rational = struct { try self.q.set(1); if (shift >= 0) { - try self.q.shiftLeft(&self.q, @intCast(usize, shift)); + try self.q.shiftLeft(&self.q, @as(usize, @intCast(shift))); } else { - try self.p.shiftLeft(&self.p, @intCast(usize, -shift)); + try self.p.shiftLeft(&self.p, @as(usize, @intCast(-shift))); } try self.reduce(); @@ -210,7 +210,7 @@ pub const Rational = struct { } // 1. left-shift a or sub so that a/b is in [1 << msize1, 1 << (msize2 + 1)] - var exp = @intCast(isize, self.p.bitCountTwosComp()) - @intCast(isize, self.q.bitCountTwosComp()); + var exp = @as(isize, @intCast(self.p.bitCountTwosComp())) - @as(isize, @intCast(self.q.bitCountTwosComp())); var a2 = try self.p.clone(); defer a2.deinit(); @@ -220,9 +220,9 @@ pub const Rational = struct { const shift = msize2 - exp; if (shift >= 0) { - try a2.shiftLeft(&a2, @intCast(usize, shift)); + try a2.shiftLeft(&a2, @as(usize, @intCast(shift))); } else { - try b2.shiftLeft(&b2, @intCast(usize, -shift)); + try b2.shiftLeft(&b2, @as(usize, @intCast(-shift))); } // 2. compute quotient and remainder @@ -254,8 +254,8 @@ pub const Rational = struct { // 4. Rounding if (emin - msize <= exp and exp <= emin) { // denormal - const shift1 = @intCast(math.Log2Int(BitReprType), emin - (exp - 1)); - const lost_bits = mantissa & ((@intCast(BitReprType, 1) << shift1) - 1); + const shift1 = @as(math.Log2Int(BitReprType), @intCast(emin - (exp - 1))); + const lost_bits = mantissa & ((@as(BitReprType, @intCast(1)) << shift1) - 1); have_rem = have_rem or lost_bits != 0; mantissa >>= shift1; exp = 2 - ebias; @@ -276,7 +276,7 @@ pub const Rational = struct { } mantissa >>= 1; - const f = math.scalbn(@floatFromInt(T, mantissa), @intCast(i32, exp - msize1)); + const f = math.scalbn(@as(T, @floatFromInt(mantissa)), @as(i32, @intCast(exp - msize1))); if (math.isInf(f)) { exact = false; } @@ -477,7 +477,7 @@ fn extractLowBits(a: Int, comptime T: type) T { const t_bits = @typeInfo(T).Int.bits; const limb_bits = @typeInfo(Limb).Int.bits; if (t_bits <= limb_bits) { - return @truncate(T, a.limbs[0]); + return @as(T, @truncate(a.limbs[0])); } else { var r: T = 0; comptime var i: usize = 0; diff --git a/lib/std/math/cbrt.zig b/lib/std/math/cbrt.zig index 1ff1818e8def..737757b8176f 100644 --- a/lib/std/math/cbrt.zig +++ b/lib/std/math/cbrt.zig @@ -27,7 +27,7 @@ fn cbrt32(x: f32) f32 { const B1: u32 = 709958130; // (127 - 127.0 / 3 - 0.03306235651) * 2^23 const B2: u32 = 642849266; // (127 - 127.0 / 3 - 24 / 3 - 0.03306235651) * 2^23 - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); var hx = u & 0x7FFFFFFF; // cbrt(nan, inf) = itself @@ -41,7 +41,7 @@ fn cbrt32(x: f32) f32 { if (hx == 0) { return x; } - u = @bitCast(u32, x * 0x1.0p24); + u = @as(u32, @bitCast(x * 0x1.0p24)); hx = u & 0x7FFFFFFF; hx = hx / 3 + B2; } else { @@ -52,7 +52,7 @@ fn cbrt32(x: f32) f32 { u |= hx; // first step newton to 16 bits - var t: f64 = @bitCast(f32, u); + var t: f64 = @as(f32, @bitCast(u)); var r: f64 = t * t * t; t = t * (@as(f64, x) + x + r) / (x + r + r); @@ -60,7 +60,7 @@ fn cbrt32(x: f32) f32 { r = t * t * t; t = t * (@as(f64, x) + x + r) / (x + r + r); - return @floatCast(f32, t); + return @as(f32, @floatCast(t)); } fn cbrt64(x: f64) f64 { @@ -74,8 +74,8 @@ fn cbrt64(x: f64) f64 { const P3: f64 = -0.758397934778766047437; const P4: f64 = 0.145996192886612446982; - var u = @bitCast(u64, x); - var hx = @intCast(u32, u >> 32) & 0x7FFFFFFF; + var u = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF; // cbrt(nan, inf) = itself if (hx >= 0x7FF00000) { @@ -84,8 +84,8 @@ fn cbrt64(x: f64) f64 { // cbrt to ~5bits if (hx < 0x00100000) { - u = @bitCast(u64, x * 0x1.0p54); - hx = @intCast(u32, u >> 32) & 0x7FFFFFFF; + u = @as(u64, @bitCast(x * 0x1.0p54)); + hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF; // cbrt(0) is itself if (hx == 0) { @@ -98,7 +98,7 @@ fn cbrt64(x: f64) f64 { u &= 1 << 63; u |= @as(u64, hx) << 32; - var t = @bitCast(f64, u); + var t = @as(f64, @bitCast(u)); // cbrt to 23 bits // cbrt(x) = t * cbrt(x / t^3) ~= t * P(t^3 / x) @@ -106,9 +106,9 @@ fn cbrt64(x: f64) f64 { t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4)); // Round t away from 0 to 23 bits - u = @bitCast(u64, t); + u = @as(u64, @bitCast(t)); u = (u + 0x80000000) & 0xFFFFFFFFC0000000; - t = @bitCast(f64, u); + t = @as(f64, @bitCast(u)); // one step newton to 53 bits const s = t * t; diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig index 56c199016d4c..381fc43f7d42 100644 --- a/lib/std/math/complex/atan.zig +++ b/lib/std/math/complex/atan.zig @@ -32,7 +32,7 @@ fn redupif32(x: f32) f32 { t -= 0.5; } - const u = @floatFromInt(f32, @intFromFloat(i32, t)); + const u = @as(f32, @floatFromInt(@as(i32, @intFromFloat(t)))); return ((x - u * DP1) - u * DP2) - t * DP3; } @@ -81,7 +81,7 @@ fn redupif64(x: f64) f64 { t -= 0.5; } - const u = @floatFromInt(f64, @intFromFloat(i64, t)); + const u = @as(f64, @floatFromInt(@as(i64, @intFromFloat(t)))); return ((x - u * DP1) - u * DP2) - t * DP3; } diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig index b3ffab517544..413279db2d99 100644 --- a/lib/std/math/complex/cosh.zig +++ b/lib/std/math/complex/cosh.zig @@ -26,10 +26,10 @@ fn cosh32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); const ix = hx & 0x7fffffff; - const hy = @bitCast(u32, y); + const hy = @as(u32, @bitCast(y)); const iy = hy & 0x7fffffff; if (ix < 0x7f800000 and iy < 0x7f800000) { @@ -89,14 +89,14 @@ fn cosh64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fx = @bitCast(u64, x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); const ix = hx & 0x7fffffff; - const fy = @bitCast(u64, y); - const hy = @intCast(u32, fy >> 32); - const ly = @truncate(u32, fy); + const fy = @as(u64, @bitCast(y)); + const hy = @as(u32, @intCast(fy >> 32)); + const ly = @as(u32, @truncate(fy)); const iy = hy & 0x7fffffff; // nearly non-exceptional case where x, y are finite diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig index 84ee251d0e0f..4644ea4be74e 100644 --- a/lib/std/math/complex/exp.zig +++ b/lib/std/math/complex/exp.zig @@ -30,13 +30,13 @@ fn exp32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hy = @bitCast(u32, y) & 0x7fffffff; + const hy = @as(u32, @bitCast(y)) & 0x7fffffff; // cexp(x + i0) = exp(x) + i0 if (hy == 0) { return Complex(f32).init(@exp(x), y); } - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); // cexp(0 + iy) = cos(y) + isin(y) if ((hx & 0x7fffffff) == 0) { return Complex(f32).init(@cos(y), @sin(y)); @@ -75,18 +75,18 @@ fn exp64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fy = @bitCast(u64, y); - const hy = @intCast(u32, (fy >> 32) & 0x7fffffff); - const ly = @truncate(u32, fy); + const fy = @as(u64, @bitCast(y)); + const hy = @as(u32, @intCast((fy >> 32) & 0x7fffffff)); + const ly = @as(u32, @truncate(fy)); // cexp(x + i0) = exp(x) + i0 if (hy | ly == 0) { return Complex(f64).init(@exp(x), y); } - const fx = @bitCast(u64, x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); // cexp(0 + iy) = cos(y) + isin(y) if ((hx & 0x7fffffff) | lx == 0) { diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig index c196d4afe6df..201b6305af37 100644 --- a/lib/std/math/complex/ldexp.zig +++ b/lib/std/math/complex/ldexp.zig @@ -27,10 +27,10 @@ fn frexp_exp32(x: f32, expt: *i32) f32 { const kln2 = 162.88958740; // k * ln2 const exp_x = @exp(x - kln2); - const hx = @bitCast(u32, exp_x); + const hx = @as(u32, @bitCast(exp_x)); // TODO zig should allow this cast implicitly because it should know the value is in range - expt.* = @intCast(i32, hx >> 23) - (0x7f + 127) + k; - return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23)); + expt.* = @as(i32, @intCast(hx >> 23)) - (0x7f + 127) + k; + return @as(f32, @bitCast((hx & 0x7fffff) | ((0x7f + 127) << 23))); } fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) { @@ -39,10 +39,10 @@ fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) { const exptf = expt + ex_expt; const half_expt1 = @divTrunc(exptf, 2); - const scale1 = @bitCast(f32, (0x7f + half_expt1) << 23); + const scale1 = @as(f32, @bitCast((0x7f + half_expt1) << 23)); const half_expt2 = exptf - half_expt1; - const scale2 = @bitCast(f32, (0x7f + half_expt2) << 23); + const scale2 = @as(f32, @bitCast((0x7f + half_expt2) << 23)); return Complex(f32).init( @cos(z.im) * exp_x * scale1 * scale2, @@ -56,14 +56,14 @@ fn frexp_exp64(x: f64, expt: *i32) f64 { const exp_x = @exp(x - kln2); - const fx = @bitCast(u64, exp_x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(exp_x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); - expt.* = @intCast(i32, hx >> 20) - (0x3ff + 1023) + k; + expt.* = @as(i32, @intCast(hx >> 20)) - (0x3ff + 1023) + k; const high_word = (hx & 0xfffff) | ((0x3ff + 1023) << 20); - return @bitCast(f64, (@as(u64, high_word) << 32) | lx); + return @as(f64, @bitCast((@as(u64, high_word) << 32) | lx)); } fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) { @@ -72,10 +72,10 @@ fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) { const exptf = @as(i64, expt + ex_expt); const half_expt1 = @divTrunc(exptf, 2); - const scale1 = @bitCast(f64, (0x3ff + half_expt1) << (20 + 32)); + const scale1 = @as(f64, @bitCast((0x3ff + half_expt1) << (20 + 32))); const half_expt2 = exptf - half_expt1; - const scale2 = @bitCast(f64, (0x3ff + half_expt2) << (20 + 32)); + const scale2 = @as(f64, @bitCast((0x3ff + half_expt2) << (20 + 32))); return Complex(f64).init( @cos(z.im) * exp_x * scale1 * scale2, diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig index 9afb7faf304e..c9ea0d04fc21 100644 --- a/lib/std/math/complex/sinh.zig +++ b/lib/std/math/complex/sinh.zig @@ -26,10 +26,10 @@ fn sinh32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); const ix = hx & 0x7fffffff; - const hy = @bitCast(u32, y); + const hy = @as(u32, @bitCast(y)); const iy = hy & 0x7fffffff; if (ix < 0x7f800000 and iy < 0x7f800000) { @@ -89,14 +89,14 @@ fn sinh64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fx = @bitCast(u64, x); - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const fx = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); const ix = hx & 0x7fffffff; - const fy = @bitCast(u64, y); - const hy = @intCast(u32, fy >> 32); - const ly = @truncate(u32, fy); + const fy = @as(u64, @bitCast(y)); + const hy = @as(u32, @intCast(fy >> 32)); + const ly = @as(u32, @truncate(fy)); const iy = hy & 0x7fffffff; if (ix < 0x7ff00000 and iy < 0x7ff00000) { diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig index 456d10aa85db..fe2e8e653135 100644 --- a/lib/std/math/complex/sqrt.zig +++ b/lib/std/math/complex/sqrt.zig @@ -58,14 +58,14 @@ fn sqrt32(z: Complex(f32)) Complex(f32) { if (dx >= 0) { const t = @sqrt((dx + math.hypot(f64, dx, dy)) * 0.5); return Complex(f32).init( - @floatCast(f32, t), - @floatCast(f32, dy / (2.0 * t)), + @as(f32, @floatCast(t)), + @as(f32, @floatCast(dy / (2.0 * t))), ); } else { const t = @sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5); return Complex(f32).init( - @floatCast(f32, @fabs(y) / (2.0 * t)), - @floatCast(f32, math.copysign(t, y)), + @as(f32, @floatCast(@fabs(y) / (2.0 * t))), + @as(f32, @floatCast(math.copysign(t, y))), ); } } diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig index 92e197e308dd..a90f14174187 100644 --- a/lib/std/math/complex/tanh.zig +++ b/lib/std/math/complex/tanh.zig @@ -24,7 +24,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; - const hx = @bitCast(u32, x); + const hx = @as(u32, @bitCast(x)); const ix = hx & 0x7fffffff; if (ix >= 0x7f800000) { @@ -32,7 +32,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) { const r = if (y == 0) y else x * y; return Complex(f32).init(x, r); } - const xx = @bitCast(f32, hx - 0x40000000); + const xx = @as(f32, @bitCast(hx - 0x40000000)); const r = if (math.isInf(y)) y else @sin(y) * @cos(y); return Complex(f32).init(xx, math.copysign(@as(f32, 0.0), r)); } @@ -62,11 +62,11 @@ fn tanh64(z: Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; - const fx = @bitCast(u64, x); + const fx = @as(u64, @bitCast(x)); // TODO: zig should allow this conversion implicitly because it can notice that the value necessarily // fits in range. - const hx = @intCast(u32, fx >> 32); - const lx = @truncate(u32, fx); + const hx = @as(u32, @intCast(fx >> 32)); + const lx = @as(u32, @truncate(fx)); const ix = hx & 0x7fffffff; if (ix >= 0x7ff00000) { @@ -75,7 +75,7 @@ fn tanh64(z: Complex(f64)) Complex(f64) { return Complex(f64).init(x, r); } - const xx = @bitCast(f64, (@as(u64, hx - 0x40000000) << 32) | lx); + const xx = @as(f64, @bitCast((@as(u64, hx - 0x40000000) << 32) | lx)); const r = if (math.isInf(y)) y else @sin(y) * @cos(y); return Complex(f64).init(xx, math.copysign(@as(f64, 0.0), r)); } diff --git a/lib/std/math/copysign.zig b/lib/std/math/copysign.zig index b5fd6d4d9aed..3cefc0471fe6 100644 --- a/lib/std/math/copysign.zig +++ b/lib/std/math/copysign.zig @@ -7,9 +7,9 @@ pub fn copysign(magnitude: anytype, sign: @TypeOf(magnitude)) @TypeOf(magnitude) const T = @TypeOf(magnitude); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); const sign_bit_mask = @as(TBits, 1) << (@bitSizeOf(T) - 1); - const mag = @bitCast(TBits, magnitude) & ~sign_bit_mask; - const sgn = @bitCast(TBits, sign) & sign_bit_mask; - return @bitCast(T, mag | sgn); + const mag = @as(TBits, @bitCast(magnitude)) & ~sign_bit_mask; + const sgn = @as(TBits, @bitCast(sign)) & sign_bit_mask; + return @as(T, @bitCast(mag | sgn)); } test "math.copysign" { diff --git a/lib/std/math/cosh.zig b/lib/std/math/cosh.zig index d633f2fa0c64..085d6fd2f9a0 100644 --- a/lib/std/math/cosh.zig +++ b/lib/std/math/cosh.zig @@ -29,9 +29,9 @@ pub fn cosh(x: anytype) @TypeOf(x) { // = 1 + 0.5 * (exp(x) - 1) * (exp(x) - 1) / exp(x) // = 1 + (x * x) / 2 + o(x^4) fn cosh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const ux = u & 0x7FFFFFFF; - const ax = @bitCast(f32, ux); + const ax = @as(f32, @bitCast(ux)); // |x| < log(2) if (ux < 0x3F317217) { @@ -54,9 +54,9 @@ fn cosh32(x: f32) f32 { } fn cosh64(x: f64) f64 { - const u = @bitCast(u64, x); - const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1); - const ax = @bitCast(f64, u & (maxInt(u64) >> 1)); + const u = @as(u64, @bitCast(x)); + const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1); + const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // TODO: Shouldn't need this explicit check. if (x == 0.0) { diff --git a/lib/std/math/expm1.zig b/lib/std/math/expm1.zig index 5c4052db56c5..8192573a88c4 100644 --- a/lib/std/math/expm1.zig +++ b/lib/std/math/expm1.zig @@ -38,7 +38,7 @@ fn expm1_32(x_: f32) f32 { const Q2: f32 = 1.5807170421e-3; var x = x_; - const ux = @bitCast(u32, x); + const ux = @as(u32, @bitCast(x)); const hx = ux & 0x7FFFFFFF; const sign = hx >> 31; @@ -88,8 +88,8 @@ fn expm1_32(x_: f32) f32 { kf += 0.5; } - k = @intFromFloat(i32, kf); - const t = @floatFromInt(f32, k); + k = @as(i32, @intFromFloat(kf)); + const t = @as(f32, @floatFromInt(k)); hi = x - t * ln2_hi; lo = t * ln2_lo; } @@ -133,7 +133,7 @@ fn expm1_32(x_: f32) f32 { } } - const twopk = @bitCast(f32, @intCast(u32, (0x7F +% k) << 23)); + const twopk = @as(f32, @bitCast(@as(u32, @intCast((0x7F +% k) << 23)))); if (k < 0 or k > 56) { var y = x - e + 1.0; @@ -146,7 +146,7 @@ fn expm1_32(x_: f32) f32 { return y - 1.0; } - const uf = @bitCast(f32, @intCast(u32, 0x7F -% k) << 23); + const uf = @as(f32, @bitCast(@as(u32, @intCast(0x7F -% k)) << 23)); if (k < 23) { return (x - e + (1 - uf)) * twopk; } else { @@ -169,8 +169,8 @@ fn expm1_64(x_: f64) f64 { const Q5: f64 = -2.01099218183624371326e-07; var x = x_; - const ux = @bitCast(u64, x); - const hx = @intCast(u32, ux >> 32) & 0x7FFFFFFF; + const ux = @as(u64, @bitCast(x)); + const hx = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF; const sign = ux >> 63; if (math.isNegativeInf(x)) { @@ -219,8 +219,8 @@ fn expm1_64(x_: f64) f64 { kf += 0.5; } - k = @intFromFloat(i32, kf); - const t = @floatFromInt(f64, k); + k = @as(i32, @intFromFloat(kf)); + const t = @as(f64, @floatFromInt(k)); hi = x - t * ln2_hi; lo = t * ln2_lo; } @@ -231,7 +231,7 @@ fn expm1_64(x_: f64) f64 { // |x| < 2^(-54) else if (hx < 0x3C900000) { if (hx < 0x00100000) { - math.doNotOptimizeAway(@floatCast(f32, x)); + math.doNotOptimizeAway(@as(f32, @floatCast(x))); } return x; } else { @@ -264,7 +264,7 @@ fn expm1_64(x_: f64) f64 { } } - const twopk = @bitCast(f64, @intCast(u64, 0x3FF +% k) << 52); + const twopk = @as(f64, @bitCast(@as(u64, @intCast(0x3FF +% k)) << 52)); if (k < 0 or k > 56) { var y = x - e + 1.0; @@ -277,7 +277,7 @@ fn expm1_64(x_: f64) f64 { return y - 1.0; } - const uf = @bitCast(f64, @intCast(u64, 0x3FF -% k) << 52); + const uf = @as(f64, @bitCast(@as(u64, @intCast(0x3FF -% k)) << 52)); if (k < 20) { return (x - e + (1 - uf)) * twopk; } else { diff --git a/lib/std/math/expo2.zig b/lib/std/math/expo2.zig index 4345233173cf..b451e468656f 100644 --- a/lib/std/math/expo2.zig +++ b/lib/std/math/expo2.zig @@ -21,7 +21,7 @@ fn expo2f(x: f32) f32 { const kln2 = 0x1.45C778p+7; const u = (0x7F + k / 2) << 23; - const scale = @bitCast(f32, u); + const scale = @as(f32, @bitCast(u)); return @exp(x - kln2) * scale * scale; } @@ -30,6 +30,6 @@ fn expo2d(x: f64) f64 { const kln2 = 0x1.62066151ADD8BP+10; const u = (0x3FF + k / 2) << 20; - const scale = @bitCast(f64, @as(u64, u) << 32); + const scale = @as(f64, @bitCast(@as(u64, u) << 32)); return @exp(x - kln2) * scale * scale; } diff --git a/lib/std/math/float.zig b/lib/std/math/float.zig index 768cc032852a..5552ec5c9c40 100644 --- a/lib/std/math/float.zig +++ b/lib/std/math/float.zig @@ -11,7 +11,7 @@ inline fn mantissaOne(comptime T: type) comptime_int { inline fn reconstructFloat(comptime T: type, comptime exponent: comptime_int, comptime mantissa: comptime_int) T { const TBits = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } }); const biased_exponent = @as(TBits, exponent + floatExponentMax(T)); - return @bitCast(T, (biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa)); + return @as(T, @bitCast((biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa))); } /// Returns the number of bits in the exponent of floating point type T. diff --git a/lib/std/math/frexp.zig b/lib/std/math/frexp.zig index 31168d28d4c7..f295b959cb77 100644 --- a/lib/std/math/frexp.zig +++ b/lib/std/math/frexp.zig @@ -38,8 +38,8 @@ pub fn frexp(x: anytype) Frexp(@TypeOf(x)) { fn frexp32(x: f32) Frexp(f32) { var result: Frexp(f32) = undefined; - var y = @bitCast(u32, x); - const e = @intCast(i32, y >> 23) & 0xFF; + var y = @as(u32, @bitCast(x)); + const e = @as(i32, @intCast(y >> 23)) & 0xFF; if (e == 0) { if (x != 0) { @@ -68,15 +68,15 @@ fn frexp32(x: f32) Frexp(f32) { result.exponent = e - 0x7E; y &= 0x807FFFFF; y |= 0x3F000000; - result.significand = @bitCast(f32, y); + result.significand = @as(f32, @bitCast(y)); return result; } fn frexp64(x: f64) Frexp(f64) { var result: Frexp(f64) = undefined; - var y = @bitCast(u64, x); - const e = @intCast(i32, y >> 52) & 0x7FF; + var y = @as(u64, @bitCast(x)); + const e = @as(i32, @intCast(y >> 52)) & 0x7FF; if (e == 0) { if (x != 0) { @@ -105,15 +105,15 @@ fn frexp64(x: f64) Frexp(f64) { result.exponent = e - 0x3FE; y &= 0x800FFFFFFFFFFFFF; y |= 0x3FE0000000000000; - result.significand = @bitCast(f64, y); + result.significand = @as(f64, @bitCast(y)); return result; } fn frexp128(x: f128) Frexp(f128) { var result: Frexp(f128) = undefined; - var y = @bitCast(u128, x); - const e = @intCast(i32, y >> 112) & 0x7FFF; + var y = @as(u128, @bitCast(x)); + const e = @as(i32, @intCast(y >> 112)) & 0x7FFF; if (e == 0) { if (x != 0) { @@ -142,7 +142,7 @@ fn frexp128(x: f128) Frexp(f128) { result.exponent = e - 0x3FFE; y &= 0x8000FFFFFFFFFFFFFFFFFFFFFFFFFFFF; y |= 0x3FFE0000000000000000000000000000; - result.significand = @bitCast(f128, y); + result.significand = @as(f128, @bitCast(y)); return result; } diff --git a/lib/std/math/hypot.zig b/lib/std/math/hypot.zig index 981f6143feb5..9fb569667b5b 100644 --- a/lib/std/math/hypot.zig +++ b/lib/std/math/hypot.zig @@ -25,8 +25,8 @@ pub fn hypot(comptime T: type, x: T, y: T) T { } fn hypot32(x: f32, y: f32) f32 { - var ux = @bitCast(u32, x); - var uy = @bitCast(u32, y); + var ux = @as(u32, @bitCast(x)); + var uy = @as(u32, @bitCast(y)); ux &= maxInt(u32) >> 1; uy &= maxInt(u32) >> 1; @@ -36,8 +36,8 @@ fn hypot32(x: f32, y: f32) f32 { uy = tmp; } - var xx = @bitCast(f32, ux); - var yy = @bitCast(f32, uy); + var xx = @as(f32, @bitCast(ux)); + var yy = @as(f32, @bitCast(uy)); if (uy == 0xFF << 23) { return yy; } @@ -56,7 +56,7 @@ fn hypot32(x: f32, y: f32) f32 { yy *= 0x1.0p-90; } - return z * @sqrt(@floatCast(f32, @as(f64, x) * x + @as(f64, y) * y)); + return z * @sqrt(@as(f32, @floatCast(@as(f64, x) * x + @as(f64, y) * y))); } fn sq(hi: *f64, lo: *f64, x: f64) void { @@ -69,8 +69,8 @@ fn sq(hi: *f64, lo: *f64, x: f64) void { } fn hypot64(x: f64, y: f64) f64 { - var ux = @bitCast(u64, x); - var uy = @bitCast(u64, y); + var ux = @as(u64, @bitCast(x)); + var uy = @as(u64, @bitCast(y)); ux &= maxInt(u64) >> 1; uy &= maxInt(u64) >> 1; @@ -82,8 +82,8 @@ fn hypot64(x: f64, y: f64) f64 { const ex = ux >> 52; const ey = uy >> 52; - var xx = @bitCast(f64, ux); - var yy = @bitCast(f64, uy); + var xx = @as(f64, @bitCast(ux)); + var yy = @as(f64, @bitCast(uy)); // hypot(inf, nan) == inf if (ey == 0x7FF) { diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig index 7c58be2ec519..735a2250c9fd 100644 --- a/lib/std/math/ilogb.zig +++ b/lib/std/math/ilogb.zig @@ -38,8 +38,8 @@ fn ilogbX(comptime T: type, x: T) i32 { const absMask = signBit - 1; - var u = @bitCast(Z, x) & absMask; - var e = @intCast(i32, u >> significandBits); + var u = @as(Z, @bitCast(x)) & absMask; + var e = @as(i32, @intCast(u >> significandBits)); if (e == 0) { if (u == 0) { @@ -49,12 +49,12 @@ fn ilogbX(comptime T: type, x: T) i32 { // offset sign bit, exponent bits, and integer bit (if present) + bias const offset = 1 + exponentBits + @as(comptime_int, @intFromBool(T == f80)) - exponentBias; - return offset - @intCast(i32, @clz(u)); + return offset - @as(i32, @intCast(@clz(u))); } if (e == maxExponent) { math.raiseInvalid(); - if (u > @bitCast(Z, math.inf(T))) { + if (u > @as(Z, @bitCast(math.inf(T)))) { return fp_ilogbnan; // u is a NaN } else return maxInt(i32); } diff --git a/lib/std/math/isfinite.zig b/lib/std/math/isfinite.zig index 556f8a2378db..36c6cdd062b6 100644 --- a/lib/std/math/isfinite.zig +++ b/lib/std/math/isfinite.zig @@ -7,7 +7,7 @@ pub fn isFinite(x: anytype) bool { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); const remove_sign = ~@as(TBits, 0) >> 1; - return @bitCast(TBits, x) & remove_sign < @bitCast(TBits, math.inf(T)); + return @as(TBits, @bitCast(x)) & remove_sign < @as(TBits, @bitCast(math.inf(T))); } test "math.isFinite" { diff --git a/lib/std/math/isinf.zig b/lib/std/math/isinf.zig index ac30470f31c3..9b3a0a8f4a8a 100644 --- a/lib/std/math/isinf.zig +++ b/lib/std/math/isinf.zig @@ -7,7 +7,7 @@ pub inline fn isInf(x: anytype) bool { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); const remove_sign = ~@as(TBits, 0) >> 1; - return @bitCast(TBits, x) & remove_sign == @bitCast(TBits, math.inf(T)); + return @as(TBits, @bitCast(x)) & remove_sign == @as(TBits, @bitCast(math.inf(T))); } /// Returns whether x is an infinity with a positive sign. diff --git a/lib/std/math/isnormal.zig b/lib/std/math/isnormal.zig index 08f848f5dfc7..38b459b54e66 100644 --- a/lib/std/math/isnormal.zig +++ b/lib/std/math/isnormal.zig @@ -15,7 +15,7 @@ pub fn isNormal(x: anytype) bool { // The sign bit is removed because all ones would overflow into it. // For f80, even though it has an explicit integer part stored, // the exponent effectively takes priority if mismatching. - const value = @bitCast(TBits, x) +% increment_exp; + const value = @as(TBits, @bitCast(x)) +% increment_exp; return value & remove_sign >= (increment_exp << 1); } @@ -35,7 +35,7 @@ test "math.isNormal" { try expect(!isNormal(@as(T, math.floatTrueMin(T)))); // largest subnormal - try expect(!isNormal(@bitCast(T, ~(~@as(TBits, 0) << math.floatFractionalBits(T))))); + try expect(!isNormal(@as(T, @bitCast(~(~@as(TBits, 0) << math.floatFractionalBits(T)))))); // non-finite numbers try expect(!isNormal(-math.inf(T))); @@ -43,6 +43,6 @@ test "math.isNormal" { try expect(!isNormal(math.nan(T))); // overflow edge-case (described in implementation, also see #10133) - try expect(!isNormal(@bitCast(T, ~@as(TBits, 0)))); + try expect(!isNormal(@as(T, @bitCast(~@as(TBits, 0))))); } } diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig index 448e94f8e5bd..d32a8189b6e7 100644 --- a/lib/std/math/ldexp.zig +++ b/lib/std/math/ldexp.zig @@ -16,53 +16,53 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) { const max_biased_exponent = 2 * math.floatExponentMax(T); const mantissa_mask = @as(TBits, (1 << mantissa_bits) - 1); - const repr = @bitCast(TBits, x); + const repr = @as(TBits, @bitCast(x)); const sign_bit = repr & (1 << (exponent_bits + mantissa_bits)); if (math.isNan(x) or !math.isFinite(x)) return x; - var exponent: i32 = @intCast(i32, (repr << 1) >> (mantissa_bits + 1)); + var exponent: i32 = @as(i32, @intCast((repr << 1) >> (mantissa_bits + 1))); if (exponent == 0) exponent += (@as(i32, exponent_bits) + @intFromBool(T == f80)) - @clz(repr << 1); if (n >= 0) { if (n > max_biased_exponent - exponent) { // Overflow. Return +/- inf - return @bitCast(T, @bitCast(TBits, math.inf(T)) | sign_bit); + return @as(T, @bitCast(@as(TBits, @bitCast(math.inf(T))) | sign_bit)); } else if (exponent + n <= 0) { // Result is subnormal - return @bitCast(T, (repr << @intCast(Log2Int(TBits), n)) | sign_bit); + return @as(T, @bitCast((repr << @as(Log2Int(TBits), @intCast(n))) | sign_bit)); } else if (exponent <= 0) { // Result is normal, but needs shifting - var result = @intCast(TBits, n + exponent) << mantissa_bits; - result |= (repr << @intCast(Log2Int(TBits), 1 - exponent)) & mantissa_mask; - return @bitCast(T, result | sign_bit); + var result = @as(TBits, @intCast(n + exponent)) << mantissa_bits; + result |= (repr << @as(Log2Int(TBits), @intCast(1 - exponent))) & mantissa_mask; + return @as(T, @bitCast(result | sign_bit)); } // Result needs no shifting - return @bitCast(T, repr + (@intCast(TBits, n) << mantissa_bits)); + return @as(T, @bitCast(repr + (@as(TBits, @intCast(n)) << mantissa_bits))); } else { if (n <= -exponent) { if (n < -(mantissa_bits + exponent)) - return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0 + return @as(T, @bitCast(sign_bit)); // Severe underflow. Return +/- 0 // Result underflowed, we need to shift and round - const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1)); + const shift = @as(Log2Int(TBits), @intCast(@min(-n, -(exponent + n) + 1))); const exact_tie: bool = @ctz(repr) == shift - 1; var result = repr & mantissa_mask; if (T != f80) // Include integer bit result |= @as(TBits, @intFromBool(exponent > 0)) << fractional_bits; - result = @intCast(TBits, (result >> (shift - 1))); + result = @as(TBits, @intCast((result >> (shift - 1)))); // Round result, including round-to-even for exact ties result = ((result + 1) >> 1) & ~@as(TBits, @intFromBool(exact_tie)); - return @bitCast(T, result | sign_bit); + return @as(T, @bitCast(result | sign_bit)); } // Result is exact, and needs no shifting - return @bitCast(T, repr - (@intCast(TBits, -n) << mantissa_bits)); + return @as(T, @bitCast(repr - (@as(TBits, @intCast(-n)) << mantissa_bits))); } } @@ -105,8 +105,8 @@ test "math.ldexp" { // Multiplications might flush the denormals to zero, esp. at // runtime, so we manually construct the constants here instead. const Z = std.meta.Int(.unsigned, @bitSizeOf(T)); - const EightTimesTrueMin = @bitCast(T, @as(Z, 8)); - const TwoTimesTrueMin = @bitCast(T, @as(Z, 2)); + const EightTimesTrueMin = @as(T, @bitCast(@as(Z, 8))); + const TwoTimesTrueMin = @as(T, @bitCast(@as(Z, 2))); // subnormals -> subnormals try expect(ldexp(math.floatTrueMin(T), 3) == EightTimesTrueMin); diff --git a/lib/std/math/log.zig b/lib/std/math/log.zig index c1a0f5c8e473..9f27130ce1dc 100644 --- a/lib/std/math/log.zig +++ b/lib/std/math/log.zig @@ -30,12 +30,12 @@ pub fn log(comptime T: type, base: T, x: T) T { // TODO implement integer log without using float math .Int => |IntType| switch (IntType.signedness) { .signed => @compileError("log not implemented for signed integers"), - .unsigned => return @intFromFloat(T, @floor(@log(@floatFromInt(f64, x)) / @log(float_base))), + .unsigned => return @as(T, @intFromFloat(@floor(@log(@as(f64, @floatFromInt(x))) / @log(float_base)))), }, .Float => { switch (T) { - f32 => return @floatCast(f32, @log(@as(f64, x)) / @log(float_base)), + f32 => return @as(f32, @floatCast(@log(@as(f64, x)) / @log(float_base))), f64 => return @log(x) / @log(float_base), else => @compileError("log not implemented for " ++ @typeName(T)), } diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig index 44e5a884459b..785f11771ca5 100644 --- a/lib/std/math/log10.zig +++ b/lib/std/math/log10.zig @@ -49,9 +49,9 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) { const bit_size = @typeInfo(T).Int.bits; if (bit_size <= 8) { - return @intCast(OutT, log10_int_u8(x)); + return @as(OutT, @intCast(log10_int_u8(x))); } else if (bit_size <= 16) { - return @intCast(OutT, less_than_5(x)); + return @as(OutT, @intCast(less_than_5(x))); } var val = x; @@ -71,7 +71,7 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) { log += 5; } - return @intCast(OutT, log + less_than_5(@intCast(u32, val))); + return @as(OutT, @intCast(log + less_than_5(@as(u32, @intCast(val))))); } fn pow10(comptime y: comptime_int) comptime_int { @@ -134,7 +134,7 @@ inline fn less_than_5(x: u32) u32 { } fn oldlog10(x: anytype) u8 { - return @intFromFloat(u8, @log10(@floatFromInt(f64, x))); + return @as(u8, @intFromFloat(@log10(@as(f64, @floatFromInt(x))))); } test "oldlog10 doesn't work" { @@ -158,7 +158,7 @@ test "log10_int vs old implementation" { inline for (int_types) |T| { const last = @min(maxInt(T), 100_000); for (1..last) |i| { - const x = @intCast(T, i); + const x = @as(T, @intCast(i)); try testing.expectEqual(oldlog10(x), log10_int(x)); } @@ -185,10 +185,10 @@ test "log10_int close to powers of 10" { try testing.expectEqual(expected_max_ilog, log10_int(max_val)); for (0..(expected_max_ilog + 1)) |idx| { - const i = @intCast(T, idx); + const i = @as(T, @intCast(idx)); const p: T = try math.powi(T, 10, i); - const b = @intCast(Log2Int(T), i); + const b = @as(Log2Int(T), @intCast(i)); if (p >= 10) { try testing.expectEqual(b - 1, log10_int(p - 9)); diff --git a/lib/std/math/log1p.zig b/lib/std/math/log1p.zig index ad67955a8d20..1f986a20c830 100644 --- a/lib/std/math/log1p.zig +++ b/lib/std/math/log1p.zig @@ -33,7 +33,7 @@ fn log1p_32(x: f32) f32 { const Lg3: f32 = 0x91e9ee.0p-25; const Lg4: f32 = 0xf89e26.0p-26; - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); var ix = u; var k: i32 = 1; var f: f32 = undefined; @@ -72,9 +72,9 @@ fn log1p_32(x: f32) f32 { if (k != 0) { const uf = 1 + x; - var iu = @bitCast(u32, uf); + var iu = @as(u32, @bitCast(uf)); iu += 0x3F800000 - 0x3F3504F3; - k = @intCast(i32, iu >> 23) - 0x7F; + k = @as(i32, @intCast(iu >> 23)) - 0x7F; // correction to avoid underflow in c / u if (k < 25) { @@ -86,7 +86,7 @@ fn log1p_32(x: f32) f32 { // u into [sqrt(2)/2, sqrt(2)] iu = (iu & 0x007FFFFF) + 0x3F3504F3; - f = @bitCast(f32, iu) - 1; + f = @as(f32, @bitCast(iu)) - 1; } const s = f / (2.0 + f); @@ -96,7 +96,7 @@ fn log1p_32(x: f32) f32 { const t2 = z * (Lg1 + w * Lg3); const R = t2 + t1; const hfsq = 0.5 * f * f; - const dk = @floatFromInt(f32, k); + const dk = @as(f32, @floatFromInt(k)); return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi; } @@ -112,8 +112,8 @@ fn log1p_64(x: f64) f64 { const Lg6: f64 = 1.531383769920937332e-01; const Lg7: f64 = 1.479819860511658591e-01; - var ix = @bitCast(u64, x); - var hx = @intCast(u32, ix >> 32); + var ix = @as(u64, @bitCast(x)); + var hx = @as(u32, @intCast(ix >> 32)); var k: i32 = 1; var c: f64 = undefined; var f: f64 = undefined; @@ -150,10 +150,10 @@ fn log1p_64(x: f64) f64 { if (k != 0) { const uf = 1 + x; - const hu = @bitCast(u64, uf); - var iu = @intCast(u32, hu >> 32); + const hu = @as(u64, @bitCast(uf)); + var iu = @as(u32, @intCast(hu >> 32)); iu += 0x3FF00000 - 0x3FE6A09E; - k = @intCast(i32, iu >> 20) - 0x3FF; + k = @as(i32, @intCast(iu >> 20)) - 0x3FF; // correction to avoid underflow in c / u if (k < 54) { @@ -166,7 +166,7 @@ fn log1p_64(x: f64) f64 { // u into [sqrt(2)/2, sqrt(2)] iu = (iu & 0x000FFFFF) + 0x3FE6A09E; const iq = (@as(u64, iu) << 32) | (hu & 0xFFFFFFFF); - f = @bitCast(f64, iq) - 1; + f = @as(f64, @bitCast(iq)) - 1; } const hfsq = 0.5 * f * f; @@ -176,7 +176,7 @@ fn log1p_64(x: f64) f64 { const t1 = w * (Lg2 + w * (Lg4 + w * Lg6)); const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7))); const R = t2 + t1; - const dk = @floatFromInt(f64, k); + const dk = @as(f64, @floatFromInt(k)); return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi; } diff --git a/lib/std/math/modf.zig b/lib/std/math/modf.zig index d12c49772981..b9d0083e3c44 100644 --- a/lib/std/math/modf.zig +++ b/lib/std/math/modf.zig @@ -37,8 +37,8 @@ pub fn modf(x: anytype) modf_result(@TypeOf(x)) { fn modf32(x: f32) modf32_result { var result: modf32_result = undefined; - const u = @bitCast(u32, x); - const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F; + const u = @as(u32, @bitCast(x)); + const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F; const us = u & 0x80000000; // TODO: Shouldn't need this. @@ -54,26 +54,26 @@ fn modf32(x: f32) modf32_result { if (e == 0x80 and u << 9 != 0) { // nan result.fpart = x; } else { - result.fpart = @bitCast(f32, us); + result.fpart = @as(f32, @bitCast(us)); } return result; } // no integral part if (e < 0) { - result.ipart = @bitCast(f32, us); + result.ipart = @as(f32, @bitCast(us)); result.fpart = x; return result; } - const mask = @as(u32, 0x007FFFFF) >> @intCast(u5, e); + const mask = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e)); if (u & mask == 0) { result.ipart = x; - result.fpart = @bitCast(f32, us); + result.fpart = @as(f32, @bitCast(us)); return result; } - const uf = @bitCast(f32, u & ~mask); + const uf = @as(f32, @bitCast(u & ~mask)); result.ipart = uf; result.fpart = x - uf; return result; @@ -82,8 +82,8 @@ fn modf32(x: f32) modf32_result { fn modf64(x: f64) modf64_result { var result: modf64_result = undefined; - const u = @bitCast(u64, x); - const e = @intCast(i32, (u >> 52) & 0x7FF) - 0x3FF; + const u = @as(u64, @bitCast(x)); + const e = @as(i32, @intCast((u >> 52) & 0x7FF)) - 0x3FF; const us = u & (1 << 63); if (math.isInf(x)) { @@ -98,26 +98,26 @@ fn modf64(x: f64) modf64_result { if (e == 0x400 and u << 12 != 0) { // nan result.fpart = x; } else { - result.fpart = @bitCast(f64, us); + result.fpart = @as(f64, @bitCast(us)); } return result; } // no integral part if (e < 0) { - result.ipart = @bitCast(f64, us); + result.ipart = @as(f64, @bitCast(us)); result.fpart = x; return result; } - const mask = @as(u64, maxInt(u64) >> 12) >> @intCast(u6, e); + const mask = @as(u64, maxInt(u64) >> 12) >> @as(u6, @intCast(e)); if (u & mask == 0) { result.ipart = x; - result.fpart = @bitCast(f64, us); + result.fpart = @as(f64, @bitCast(us)); return result; } - const uf = @bitCast(f64, u & ~mask); + const uf = @as(f64, @bitCast(u & ~mask)); result.ipart = uf; result.fpart = x - uf; return result; diff --git a/lib/std/math/pow.zig b/lib/std/math/pow.zig index 7643e143e3c4..36aef966cfe8 100644 --- a/lib/std/math/pow.zig +++ b/lib/std/math/pow.zig @@ -144,7 +144,7 @@ pub fn pow(comptime T: type, x: T, y: T) T { var xe = r2.exponent; var x1 = r2.significand; - var i = @intFromFloat(std.meta.Int(.signed, @typeInfo(T).Float.bits), yi); + var i = @as(std.meta.Int(.signed, @typeInfo(T).Float.bits), @intFromFloat(yi)); while (i != 0) : (i >>= 1) { const overflow_shift = math.floatExponentBits(T) + 1; if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) { @@ -179,7 +179,7 @@ pub fn pow(comptime T: type, x: T, y: T) T { fn isOddInteger(x: f64) bool { const r = math.modf(x); - return r.fpart == 0.0 and @intFromFloat(i64, r.ipart) & 1 == 1; + return r.fpart == 0.0 and @as(i64, @intFromFloat(r.ipart)) & 1 == 1; } test "math.pow" { diff --git a/lib/std/math/signbit.zig b/lib/std/math/signbit.zig index 9aab487d37e4..df061568b100 100644 --- a/lib/std/math/signbit.zig +++ b/lib/std/math/signbit.zig @@ -6,7 +6,7 @@ const expect = std.testing.expect; pub fn signbit(x: anytype) bool { const T = @TypeOf(x); const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - return @bitCast(TBits, x) >> (@bitSizeOf(T) - 1) != 0; + return @as(TBits, @bitCast(x)) >> (@bitSizeOf(T) - 1) != 0; } test "math.signbit" { diff --git a/lib/std/math/sinh.zig b/lib/std/math/sinh.zig index 5ec47fa3b587..0082f61d3f68 100644 --- a/lib/std/math/sinh.zig +++ b/lib/std/math/sinh.zig @@ -29,9 +29,9 @@ pub fn sinh(x: anytype) @TypeOf(x) { // = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2 // = x + x^3 / 6 + o(x^5) fn sinh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const ux = u & 0x7FFFFFFF; - const ax = @bitCast(f32, ux); + const ax = @as(f32, @bitCast(ux)); if (x == 0.0 or math.isNan(x)) { return x; @@ -60,9 +60,9 @@ fn sinh32(x: f32) f32 { } fn sinh64(x: f64) f64 { - const u = @bitCast(u64, x); - const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1); - const ax = @bitCast(f64, u & (maxInt(u64) >> 1)); + const u = @as(u64, @bitCast(x)); + const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1); + const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); if (x == 0.0 or math.isNan(x)) { return x; diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig index 926582034e41..0dd5381cd946 100644 --- a/lib/std/math/sqrt.zig +++ b/lib/std/math/sqrt.zig @@ -57,7 +57,7 @@ fn sqrt_int(comptime T: type, value: T) Sqrt(T) { one >>= 2; } - return @intCast(Sqrt(T), res); + return @as(Sqrt(T), @intCast(res)); } } diff --git a/lib/std/math/tanh.zig b/lib/std/math/tanh.zig index dcde79a925a5..9c9a3e68018a 100644 --- a/lib/std/math/tanh.zig +++ b/lib/std/math/tanh.zig @@ -29,9 +29,9 @@ pub fn tanh(x: anytype) @TypeOf(x) { // = (exp(2x) - 1) / (exp(2x) - 1 + 2) // = (1 - exp(-2x)) / (exp(-2x) - 1 + 2) fn tanh32(x: f32) f32 { - const u = @bitCast(u32, x); + const u = @as(u32, @bitCast(x)); const ux = u & 0x7FFFFFFF; - const ax = @bitCast(f32, ux); + const ax = @as(f32, @bitCast(ux)); const sign = (u >> 31) != 0; var t: f32 = undefined; @@ -66,10 +66,10 @@ fn tanh32(x: f32) f32 { } fn tanh64(x: f64) f64 { - const u = @bitCast(u64, x); + const u = @as(u64, @bitCast(x)); const ux = u & 0x7FFFFFFFFFFFFFFF; - const w = @intCast(u32, ux >> 32); - const ax = @bitCast(f64, ux); + const w = @as(u32, @intCast(ux >> 32)); + const ax = @as(f64, @bitCast(ux)); const sign = (u >> 63) != 0; var t: f64 = undefined; @@ -96,7 +96,7 @@ fn tanh64(x: f64) f64 { } // |x| is subnormal else { - math.doNotOptimizeAway(@floatCast(f32, ax)); + math.doNotOptimizeAway(@as(f32, @floatCast(ax))); t = ax; } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index bbeecdda2376..229bc0b63e49 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -69,7 +69,7 @@ pub fn ValidationAllocator(comptime T: type) type { ret_addr: usize, ) ?[*]u8 { assert(n > 0); - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); const underlying = self.getUnderlyingAllocatorPtr(); const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse return null; @@ -84,7 +84,7 @@ pub fn ValidationAllocator(comptime T: type) type { new_len: usize, ret_addr: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); assert(buf.len > 0); const underlying = self.getUnderlyingAllocatorPtr(); return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr); @@ -96,7 +96,7 @@ pub fn ValidationAllocator(comptime T: type) type { log2_buf_align: u8, ret_addr: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); assert(buf.len > 0); const underlying = self.getUnderlyingAllocatorPtr(); underlying.rawFree(buf, log2_buf_align, ret_addr); @@ -169,7 +169,7 @@ test "Allocator.resize" { var values = try testing.allocator.alloc(T, 100); defer testing.allocator.free(values); - for (values, 0..) |*v, i| v.* = @intCast(T, i); + for (values, 0..) |*v, i| v.* = @as(T, @intCast(i)); if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory; values = values.ptr[0 .. values.len + 10]; try testing.expect(values.len == 110); @@ -185,7 +185,7 @@ test "Allocator.resize" { var values = try testing.allocator.alloc(T, 100); defer testing.allocator.free(values); - for (values, 0..) |*v, i| v.* = @floatFromInt(T, i); + for (values, 0..) |*v, i| v.* = @as(T, @floatFromInt(i)); if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory; values = values.ptr[0 .. values.len + 10]; try testing.expect(values.len == 110); @@ -233,7 +233,7 @@ pub fn zeroes(comptime T: type) T { return @as(T, 0); }, .Enum, .EnumLiteral => { - return @enumFromInt(T, 0); + return @as(T, @enumFromInt(0)); }, .Void => { return {}; @@ -264,7 +264,7 @@ pub fn zeroes(comptime T: type) T { switch (ptr_info.size) { .Slice => { if (ptr_info.sentinel) |sentinel| { - if (ptr_info.child == u8 and @ptrCast(*const u8, sentinel).* == 0) { + if (ptr_info.child == u8 and @as(*const u8, @ptrCast(sentinel)).* == 0) { return ""; // A special case for the most common use-case: null-terminated strings. } @compileError("Can't set a sentinel slice to zero. This would require allocating memory."); @@ -282,7 +282,7 @@ pub fn zeroes(comptime T: type) T { }, .Array => |info| { if (info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*; return [_:sentinel]info.child{zeroes(info.child)} ** info.len; } return [_]info.child{zeroes(info.child)} ** info.len; @@ -456,7 +456,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T { }, } } else if (field.default_value) |default_value_ptr| { - const default_value = @ptrCast(*align(1) const field.type, default_value_ptr).*; + const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*; @field(value, field.name) = default_value; } else { switch (@typeInfo(field.type)) { @@ -709,7 +709,7 @@ pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { const l = len(ptr); const ptr_info = @typeInfo(Result).Pointer; if (ptr_info.sentinel) |s_ptr| { - const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*; + const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*; return ptr[0..l :s]; } else { return ptr[0..l]; @@ -740,7 +740,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type { // to find the value searched for, which is only the case if it matches // the sentinel of the type passed. if (array_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*; if (end == sentinel) { new_ptr_info.sentinel = &end; } else { @@ -755,7 +755,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type { // to find the value searched for, which is only the case if it matches // the sentinel of the type passed. if (ptr_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*; if (end == sentinel) { new_ptr_info.sentinel = &end; } else { @@ -793,7 +793,7 @@ pub fn sliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) SliceTo(@Typ const length = lenSliceTo(ptr, end); const ptr_info = @typeInfo(Result).Pointer; if (ptr_info.sentinel) |s_ptr| { - const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*; + const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*; return ptr[0..length :s]; } else { return ptr[0..length]; @@ -810,11 +810,11 @@ test "sliceTo" { try testing.expectEqualSlices(u16, array[0..2], sliceTo(&array, 3)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(array[0..3], 3)); - const sentinel_ptr = @ptrCast([*:5]u16, &array); + const sentinel_ptr = @as([*:5]u16, @ptrCast(&array)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_ptr, 3)); try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_ptr, 99)); - const optional_sentinel_ptr = @ptrCast(?[*:5]u16, &array); + const optional_sentinel_ptr = @as(?[*:5]u16, @ptrCast(&array)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(optional_sentinel_ptr, 3).?); try testing.expectEqualSlices(u16, array[0..4], sliceTo(optional_sentinel_ptr, 99).?); @@ -846,7 +846,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { .One => switch (@typeInfo(ptr_info.child)) { .Array => |array_info| { if (array_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*; if (sentinel == end) { return indexOfSentinel(array_info.child, end, ptr); } @@ -856,7 +856,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { else => {}, }, .Many => if (ptr_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*; // We may be looking for something other than the sentinel, // but iterating past the sentinel would be a bug so we need // to check for both. @@ -870,7 +870,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { }, .Slice => { if (ptr_info.sentinel) |sentinel_ptr| { - const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*; if (sentinel == end) { return indexOfSentinel(ptr_info.child, sentinel, ptr); } @@ -893,7 +893,7 @@ test "lenSliceTo" { try testing.expectEqual(@as(usize, 2), lenSliceTo(&array, 3)); try testing.expectEqual(@as(usize, 2), lenSliceTo(array[0..3], 3)); - const sentinel_ptr = @ptrCast([*:5]u16, &array); + const sentinel_ptr = @as([*:5]u16, @ptrCast(&array)); try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_ptr, 3)); try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_ptr, 99)); @@ -925,7 +925,7 @@ pub fn len(value: anytype) usize { .Many => { const sentinel_ptr = info.sentinel orelse @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))); - const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*; + const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*; return indexOfSentinel(info.child, sentinel, value); }, .C => { @@ -1331,7 +1331,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian) .Little => { const ShiftType = math.Log2Int(ReturnType); for (bytes, 0..) |b, index| { - result = result | (@as(ReturnType, b) << @intCast(ShiftType, index * 8)); + result = result | (@as(ReturnType, b) << @as(ShiftType, @intCast(index * 8))); } }, } @@ -1359,8 +1359,8 @@ pub fn readVarPackedInt( const Log2N = std.math.Log2Int(T); const read_size = (bit_count + (bit_offset % 8) + 7) / 8; - const bit_shift = @intCast(u3, bit_offset % 8); - const pad = @intCast(Log2N, @bitSizeOf(T) - bit_count); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); + const pad = @as(Log2N, @intCast(@bitSizeOf(T) - bit_count)); const lowest_byte = switch (endian) { .Big => bytes.len - (bit_offset / 8) - read_size, @@ -1372,17 +1372,17 @@ pub fn readVarPackedInt( // These are the same shifts/masks we perform below, but adds `@truncate`/`@intCast` // where needed since int is smaller than a byte. const value = if (read_size == 1) b: { - break :b @truncate(uN, read_bytes[0] >> bit_shift); + break :b @as(uN, @truncate(read_bytes[0] >> bit_shift)); } else b: { const i: u1 = @intFromBool(endian == .Big); - const head = @truncate(uN, read_bytes[i] >> bit_shift); - const tail_shift = @intCast(Log2N, @as(u4, 8) - bit_shift); - const tail = @truncate(uN, read_bytes[1 - i]); + const head = @as(uN, @truncate(read_bytes[i] >> bit_shift)); + const tail_shift = @as(Log2N, @intCast(@as(u4, 8) - bit_shift)); + const tail = @as(uN, @truncate(read_bytes[1 - i])); break :b (tail << tail_shift) | head; }; switch (signedness) { - .signed => return @intCast(T, (@bitCast(iN, value) << pad) >> pad), - .unsigned => return @intCast(T, (@bitCast(uN, value) << pad) >> pad), + .signed => return @as(T, @intCast((@as(iN, @bitCast(value)) << pad) >> pad)), + .unsigned => return @as(T, @intCast((@as(uN, @bitCast(value)) << pad) >> pad)), } } @@ -1398,13 +1398,13 @@ pub fn readVarPackedInt( .Little => { int = read_bytes[0] >> bit_shift; for (read_bytes[1..], 0..) |elem, i| { - int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift))); + int |= (@as(uN, elem) << @as(Log2N, @intCast((8 * (i + 1) - bit_shift)))); } }, } switch (signedness) { - .signed => return @intCast(T, (@bitCast(iN, int) << pad) >> pad), - .unsigned => return @intCast(T, (@bitCast(uN, int) << pad) >> pad), + .signed => return @as(T, @intCast((@as(iN, @bitCast(int)) << pad) >> pad)), + .unsigned => return @as(T, @intCast((@as(uN, @bitCast(int)) << pad) >> pad)), } } @@ -1414,7 +1414,7 @@ pub fn readVarPackedInt( /// Assumes the endianness of memory is native. This means the function can /// simply pointer cast memory. pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T { - return @ptrCast(*align(1) const T, bytes).*; + return @as(*align(1) const T, @ptrCast(bytes)).*; } /// Reads an integer from memory with bit count specified by T. @@ -1480,10 +1480,10 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const load_size = (bit_count + 7) / 8; - const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count); + const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count)); const LoadInt = std.meta.Int(.unsigned, load_size * 8); if (bit_count == 0) @@ -1492,13 +1492,13 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T // Read by loading a LoadInt, and then follow it up with a 1-byte read // of the tail if bit_offset pushed us over a byte boundary. const read_bytes = bytes[bit_offset / 8 ..]; - const val = @truncate(uN, readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift); + const val = @as(uN, @truncate(readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift)); if (bit_shift > load_tail_bits) { - const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits); + const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits)); const tail_byte = read_bytes[load_size]; - const tail_truncated = if (bit_count < 8) @truncate(uN, tail_byte) else @as(uN, tail_byte); - return @bitCast(T, val | (tail_truncated << (@truncate(Log2N, bit_count) -% tail_bits))); - } else return @bitCast(T, val); + const tail_truncated = if (bit_count < 8) @as(uN, @truncate(tail_byte)) else @as(uN, tail_byte); + return @as(T, @bitCast(val | (tail_truncated << (@as(Log2N, @truncate(bit_count)) -% tail_bits)))); + } else return @as(T, @bitCast(val)); } fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T { @@ -1506,11 +1506,11 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T { const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const byte_count = (@as(usize, bit_shift) + bit_count + 7) / 8; const load_size = (bit_count + 7) / 8; - const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count); + const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count)); const LoadInt = std.meta.Int(.unsigned, load_size * 8); if (bit_count == 0) @@ -1520,12 +1520,12 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T { // of the tail if bit_offset pushed us over a byte boundary. const end = bytes.len - (bit_offset / 8); const read_bytes = bytes[(end - byte_count)..end]; - const val = @truncate(uN, readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift); + const val = @as(uN, @truncate(readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift)); if (bit_shift > load_tail_bits) { - const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits); - const tail_byte = if (bit_count < 8) @truncate(uN, read_bytes[0]) else @as(uN, read_bytes[0]); - return @bitCast(T, val | (tail_byte << (@truncate(Log2N, bit_count) -% tail_bits))); - } else return @bitCast(T, val); + const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits)); + const tail_byte = if (bit_count < 8) @as(uN, @truncate(read_bytes[0])) else @as(uN, read_bytes[0]); + return @as(T, @bitCast(val | (tail_byte << (@as(Log2N, @truncate(bit_count)) -% tail_bits)))); + } else return @as(T, @bitCast(val)); } pub const readPackedIntNative = switch (native_endian) { @@ -1605,7 +1605,7 @@ test "readIntBig and readIntLittle" { /// This function stores in native endian, which means it is implemented as a simple /// memory store. pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void { - @ptrCast(*align(1) T, buf).* = value; + @as(*align(1) T, @ptrCast(buf)).* = value; } /// Writes an integer to memory, storing it in twos-complement. @@ -1642,10 +1642,10 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value: const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const store_size = (@bitSizeOf(T) + 7) / 8; - const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count); + const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count)); const StoreInt = std.meta.Int(.unsigned, store_size * 8); if (bit_count == 0) @@ -1656,11 +1656,11 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value: const write_bytes = bytes[bit_offset / 8 ..]; const head = write_bytes[0] & ((@as(u8, 1) << bit_shift) - 1); - var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head); + var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head)); if (bit_shift > store_tail_bits) { - const tail_len = @intCast(Log2N, bit_shift - store_tail_bits); - write_bytes[store_size] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1); - write_bytes[store_size] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len))); + const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits)); + write_bytes[store_size] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1); + write_bytes[store_size] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len)))); } else if (bit_shift < store_tail_bits) { const tail_len = store_tail_bits - bit_shift; const tail = write_bytes[store_size - 1] & (@as(u8, 0xfe) << (7 - tail_len)); @@ -1675,11 +1675,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T) const Log2N = std.math.Log2Int(T); const bit_count = @as(usize, @bitSizeOf(T)); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const byte_count = (bit_shift + bit_count + 7) / 8; const store_size = (@bitSizeOf(T) + 7) / 8; - const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count); + const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count)); const StoreInt = std.meta.Int(.unsigned, store_size * 8); if (bit_count == 0) @@ -1691,11 +1691,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T) const write_bytes = bytes[(end - byte_count)..end]; const head = write_bytes[byte_count - 1] & ((@as(u8, 1) << bit_shift) - 1); - var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head); + var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head)); if (bit_shift > store_tail_bits) { - const tail_len = @intCast(Log2N, bit_shift - store_tail_bits); - write_bytes[0] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1); - write_bytes[0] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len))); + const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits)); + write_bytes[0] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1); + write_bytes[0] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len)))); } else if (bit_shift < store_tail_bits) { const tail_len = store_tail_bits - bit_shift; const tail = write_bytes[0] & (@as(u8, 0xfe) << (7 - tail_len)); @@ -1744,14 +1744,14 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void { return @memset(buffer, 0); } else if (@typeInfo(T).Int.bits == 8) { @memset(buffer, 0); - buffer[0] = @bitCast(u8, value); + buffer[0] = @as(u8, @bitCast(value)); return; } // TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - var bits = @bitCast(uint, value); + var bits = @as(uint, @bitCast(value)); for (buffer) |*b| { - b.* = @truncate(u8, bits); + b.* = @as(u8, @truncate(bits)); bits >>= 8; } } @@ -1768,17 +1768,17 @@ pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void { return @memset(buffer, 0); } else if (@typeInfo(T).Int.bits == 8) { @memset(buffer, 0); - buffer[buffer.len - 1] = @bitCast(u8, value); + buffer[buffer.len - 1] = @as(u8, @bitCast(value)); return; } // TODO I want to call writeIntBig here but comptime eval facilities aren't good enough const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - var bits = @bitCast(uint, value); + var bits = @as(uint, @bitCast(value)); var index: usize = buffer.len; while (index != 0) { index -= 1; - buffer[index] = @truncate(u8, bits); + buffer[index] = @as(u8, @truncate(bits)); bits >>= 8; } } @@ -1822,7 +1822,7 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value const uN = std.meta.Int(.unsigned, @bitSizeOf(T)); const Log2N = std.math.Log2Int(T); - const bit_shift = @intCast(u3, bit_offset % 8); + const bit_shift = @as(u3, @intCast(bit_offset % 8)); const write_size = (bit_count + bit_shift + 7) / 8; const lowest_byte = switch (endian) { .Big => bytes.len - (bit_offset / 8) - write_size, @@ -1833,8 +1833,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value if (write_size == 1) { // Single byte writes are handled specially, since we need to mask bits // on both ends of the byte. - const mask = (@as(u8, 0xff) >> @intCast(u3, 8 - bit_count)); - const new_bits = @intCast(u8, @bitCast(uN, value) & mask) << bit_shift; + const mask = (@as(u8, 0xff) >> @as(u3, @intCast(8 - bit_count))); + const new_bits = @as(u8, @intCast(@as(uN, @bitCast(value)) & mask)) << bit_shift; write_bytes[0] = (write_bytes[0] & ~(mask << bit_shift)) | new_bits; return; } @@ -1843,31 +1843,31 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value // Iterate bytes forward for Little-endian, backward for Big-endian const delta: i2 = if (endian == .Big) -1 else 1; - const start = if (endian == .Big) @intCast(isize, write_bytes.len - 1) else 0; + const start = if (endian == .Big) @as(isize, @intCast(write_bytes.len - 1)) else 0; var i: isize = start; // isize for signed index arithmetic // Write first byte, using a mask to protects bits preceding bit_offset const head_mask = @as(u8, 0xff) >> bit_shift; - write_bytes[@intCast(usize, i)] &= ~(head_mask << bit_shift); - write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & head_mask) << bit_shift; - remaining >>= @intCast(Log2N, @as(u4, 8) - bit_shift); + write_bytes[@as(usize, @intCast(i))] &= ~(head_mask << bit_shift); + write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift; + remaining >>= @as(Log2N, @intCast(@as(u4, 8) - bit_shift)); i += delta; // Write bytes[1..bytes.len - 1] if (@bitSizeOf(T) > 8) { - const loop_end = start + delta * (@intCast(isize, write_size) - 1); + const loop_end = start + delta * (@as(isize, @intCast(write_size)) - 1); while (i != loop_end) : (i += delta) { - write_bytes[@intCast(usize, i)] = @truncate(u8, @bitCast(uN, remaining)); + write_bytes[@as(usize, @intCast(i))] = @as(u8, @truncate(@as(uN, @bitCast(remaining)))); remaining >>= 8; } } // Write last byte, using a mask to protect bits following bit_offset + bit_count - const following_bits = -%@truncate(u3, bit_shift + bit_count); + const following_bits = -%@as(u3, @truncate(bit_shift + bit_count)); const tail_mask = (@as(u8, 0xff) << following_bits) >> following_bits; - write_bytes[@intCast(usize, i)] &= ~tail_mask; - write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & tail_mask); + write_bytes[@as(usize, @intCast(i))] &= ~tail_mask; + write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & tail_mask)); } test "writeIntBig and writeIntLittle" { @@ -3799,15 +3799,14 @@ pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize { /// type. pub fn alignPointer(ptr: anytype, align_to: usize) ?@TypeOf(ptr) { const adjust_off = alignPointerOffset(ptr, align_to) orelse return null; - const T = @TypeOf(ptr); // Avoid the use of ptrFromInt to avoid losing the pointer provenance info. - return @alignCast(@typeInfo(T).Pointer.alignment, ptr + adjust_off); + return @alignCast(ptr + adjust_off); } test "alignPointer" { const S = struct { fn checkAlign(comptime T: type, base: usize, align_to: usize, expected: usize) !void { - var ptr = @ptrFromInt(T, base); + var ptr = @as(T, @ptrFromInt(base)); var aligned = alignPointer(ptr, align_to); try testing.expectEqual(expected, @intFromPtr(aligned)); } @@ -3854,9 +3853,7 @@ fn AsBytesReturnType(comptime P: type) type { /// Given a pointer to a single item, returns a slice of the underlying bytes, preserving pointer attributes. pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) { - const P = @TypeOf(ptr); - const T = AsBytesReturnType(P); - return @ptrCast(T, @alignCast(meta.alignment(T), ptr)); + return @ptrCast(@alignCast(ptr)); } test "asBytes" { @@ -3902,7 +3899,7 @@ test "asBytes" { test "asBytes preserves pointer attributes" { const inArr: u32 align(16) = 0xDEADBEEF; - const inPtr = @ptrCast(*align(16) const volatile u32, &inArr); + const inPtr = @as(*align(16) const volatile u32, @ptrCast(&inArr)); const outSlice = asBytes(inPtr); const in = @typeInfo(@TypeOf(inPtr)).Pointer; @@ -3948,7 +3945,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type { /// Given a pointer to an array of bytes, returns a pointer to a value of the specified type /// backed by those bytes, preserving pointer attributes. pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) { - return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes); + return @as(BytesAsValueReturnType(T, @TypeOf(bytes)), @ptrCast(bytes)); } test "bytesAsValue" { @@ -3993,7 +3990,7 @@ test "bytesAsValue" { test "bytesAsValue preserves pointer attributes" { const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; - const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..]; + const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..]; const outPtr = bytesAsValue(u32, inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; @@ -4043,7 +4040,7 @@ pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, const cast_target = CopyPtrAttrs(@TypeOf(bytes), .Many, T); - return @ptrCast(cast_target, bytes)[0..@divExact(bytes.len, @sizeOf(T))]; + return @as(cast_target, @ptrCast(bytes))[0..@divExact(bytes.len, @sizeOf(T))]; } test "bytesAsSlice" { @@ -4101,7 +4098,7 @@ test "bytesAsSlice with specified alignment" { test "bytesAsSlice preserves pointer attributes" { const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; - const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..]; + const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..]; const outSlice = bytesAsSlice(u16, inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; @@ -4133,7 +4130,7 @@ pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) { const cast_target = CopyPtrAttrs(Slice, .Many, u8); - return @ptrCast(cast_target, slice)[0 .. slice.len * @sizeOf(meta.Elem(Slice))]; + return @as(cast_target, @ptrCast(slice))[0 .. slice.len * @sizeOf(meta.Elem(Slice))]; } test "sliceAsBytes" { @@ -4197,7 +4194,7 @@ test "sliceAsBytes and bytesAsSlice back" { test "sliceAsBytes preserves pointer attributes" { const inArr align(16) = [2]u16{ 0xDEAD, 0xBEEF }; - const inSlice = @ptrCast(*align(16) const volatile [2]u16, &inArr)[0..]; + const inSlice = @as(*align(16) const volatile [2]u16, @ptrCast(&inArr))[0..]; const outSlice = sliceAsBytes(inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; @@ -4218,7 +4215,7 @@ pub fn alignForward(comptime T: type, addr: T, alignment: T) T { } pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { - const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); + const alignment = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_alignment)); return alignForward(usize, addr, alignment); } @@ -4282,7 +4279,7 @@ pub fn doNotOptimizeAway(val: anytype) void { /// .stage2_c doesn't support asm blocks yet, so use volatile stores instead var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined; fn doNotOptimizeAwayC(ptr: anytype) void { - const dest = @ptrCast(*volatile u8, &deopt_target); + const dest = @as(*volatile u8, @ptrCast(&deopt_target)); for (asBytes(ptr)) |b| { dest.* = b; } @@ -4433,7 +4430,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali error.Overflow => return null, }; const alignment_offset = begin_address_aligned - begin_address; - return @alignCast(new_alignment, bytes[alignment_offset .. alignment_offset + new_length]); + return @alignCast(bytes[alignment_offset .. alignment_offset + new_length]); } /// Returns the largest sub-slice within the given slice that conforms to the new alignment, @@ -4445,7 +4442,7 @@ pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice const Element = @TypeOf(slice[0]); const slice_length_bytes = aligned_bytes.len - (aligned_bytes.len % @sizeOf(Element)); const aligned_slice = bytesAsSlice(Element, aligned_bytes[0..slice_length_bytes]); - return @alignCast(new_alignment, aligned_slice); + return @alignCast(aligned_slice); } test "read/write(Var)PackedInt" { @@ -4490,8 +4487,8 @@ test "read/write(Var)PackedInt" { for ([_]PackedType{ ~@as(PackedType, 0), // all ones: -1 iN / maxInt uN @as(PackedType, 0), // all zeros: 0 iN / 0 uN - @bitCast(PackedType, @as(iPackedType, math.maxInt(iPackedType))), // maxInt iN - @bitCast(PackedType, @as(iPackedType, math.minInt(iPackedType))), // maxInt iN + @as(PackedType, @bitCast(@as(iPackedType, math.maxInt(iPackedType)))), // maxInt iN + @as(PackedType, @bitCast(@as(iPackedType, math.minInt(iPackedType)))), // maxInt iN random.int(PackedType), // random random.int(PackedType), // random }) |write_value| { @@ -4502,11 +4499,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, native_endian); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Write writePackedInt(PackedType, asBytes(&value), offset, write_value, native_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, native_endian); @@ -4515,9 +4512,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ value; if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } { // Fixed-size Read/Write (Foreign-endian) @@ -4527,11 +4524,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Write writePackedInt(PackedType, asBytes(&value), offset, write_value, foreign_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian); @@ -4540,9 +4537,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ @byteSwap(value); if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } const signedness = @typeInfo(PackedType).Int.signedness; @@ -4559,11 +4556,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Write writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), native_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness); @@ -4572,9 +4569,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ value; if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } { // Variable-size Read/Write (Foreign-endian) @@ -4587,11 +4584,11 @@ test "read/write(Var)PackedInt" { // Read const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness); - try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Write writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), foreign_endian); - try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset)))); + try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset))))))); // Read again const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness); @@ -4600,9 +4597,9 @@ test "read/write(Var)PackedInt" { // Verify bits outside of the target integer are unmodified const diff_bits = init_value ^ @byteSwap(value); if (offset != offset_at_end) - try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0); + try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0); if (offset != 0) - try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0); + try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0); } } } diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 301480f66273..214a6443d2e3 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -101,7 +101,7 @@ pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr: /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. pub fn create(self: Allocator, comptime T: type) Error!*T { - if (@sizeOf(T) == 0) return @ptrFromInt(*T, math.maxInt(usize)); + if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize))); const slice = try self.allocAdvancedWithRetAddr(T, null, 1, @returnAddress()); return &slice[0]; } @@ -112,7 +112,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void { const info = @typeInfo(@TypeOf(ptr)).Pointer; const T = info.child; if (@sizeOf(T) == 0) return; - const non_const_ptr = @ptrCast([*]u8, @constCast(ptr)); + const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr))); self.rawFree(non_const_ptr[0..@sizeOf(T)], math.log2(info.alignment), @returnAddress()); } @@ -209,15 +209,15 @@ pub fn allocAdvancedWithRetAddr( if (n == 0) { const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); - return @ptrFromInt([*]align(a) T, ptr)[0..0]; + return @as([*]align(a) T, @ptrFromInt(ptr))[0..0]; } const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory; // TODO: https://github.com/ziglang/zig/issues/4298 @memset(byte_ptr[0..byte_count], undefined); - const byte_slice = byte_ptr[0..byte_count]; - return mem.bytesAsSlice(T, @alignCast(a, byte_slice)); + const byte_slice: []align(a) u8 = @alignCast(byte_ptr[0..byte_count]); + return mem.bytesAsSlice(T, byte_slice); } /// Requests to modify the size of an allocation. It is guaranteed to not move @@ -268,7 +268,7 @@ pub fn reallocAdvanced( if (new_n == 0) { self.free(old_mem); const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment); - return @ptrFromInt([*]align(Slice.alignment) T, ptr)[0..0]; + return @as([*]align(Slice.alignment) T, @ptrFromInt(ptr))[0..0]; } const old_byte_slice = mem.sliceAsBytes(old_mem); @@ -276,7 +276,8 @@ pub fn reallocAdvanced( // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure if (mem.isAligned(@intFromPtr(old_byte_slice.ptr), Slice.alignment)) { if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) { - return mem.bytesAsSlice(T, @alignCast(Slice.alignment, old_byte_slice.ptr[0..byte_count])); + const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]); + return mem.bytesAsSlice(T, new_bytes); } } @@ -288,7 +289,8 @@ pub fn reallocAdvanced( @memset(old_byte_slice, undefined); self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address); - return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count])); + const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]); + return mem.bytesAsSlice(T, new_bytes); } /// Free an array allocated with `alloc`. To free a single item, diff --git a/lib/std/meta.zig b/lib/std/meta.zig index fedbd1a40d74..8fe0aee9fba0 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -185,18 +185,18 @@ pub fn sentinel(comptime T: type) ?Elem(T) { switch (@typeInfo(T)) { .Array => |info| { const sentinel_ptr = info.sentinel orelse return null; - return @ptrCast(*const info.child, sentinel_ptr).*; + return @as(*const info.child, @ptrCast(sentinel_ptr)).*; }, .Pointer => |info| { switch (info.size) { .Many, .Slice => { const sentinel_ptr = info.sentinel orelse return null; - return @ptrCast(*align(1) const info.child, sentinel_ptr).*; + return @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*; }, .One => switch (@typeInfo(info.child)) { .Array => |array_info| { const sentinel_ptr = array_info.sentinel orelse return null; - return @ptrCast(*align(1) const array_info.child, sentinel_ptr).*; + return @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*; }, else => {}, }, @@ -241,7 +241,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { .Array = .{ .len = array_info.len, .child = array_info.child, - .sentinel = @ptrCast(?*const anyopaque, &sentinel_val), + .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)), }, }), .is_allowzero = info.is_allowzero, @@ -259,7 +259,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { .address_space = info.address_space, .child = info.child, .is_allowzero = info.is_allowzero, - .sentinel = @ptrCast(?*const anyopaque, &sentinel_val), + .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)), }, }), else => {}, @@ -277,7 +277,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { .address_space = ptr_info.address_space, .child = ptr_info.child, .is_allowzero = ptr_info.is_allowzero, - .sentinel = @ptrCast(?*const anyopaque, &sentinel_val), + .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)), }, }), }, @@ -929,8 +929,8 @@ test "intToEnum with error return" { try testing.expect(intToEnum(E1, zero) catch unreachable == E1.A); try testing.expect(intToEnum(E2, one) catch unreachable == E2.B); try testing.expect(intToEnum(E3, zero) catch unreachable == E3.A); - try testing.expect(intToEnum(E3, 127) catch unreachable == @enumFromInt(E3, 127)); - try testing.expect(intToEnum(E3, -128) catch unreachable == @enumFromInt(E3, -128)); + try testing.expect(intToEnum(E3, 127) catch unreachable == @as(E3, @enumFromInt(127))); + try testing.expect(intToEnum(E3, -128) catch unreachable == @as(E3, @enumFromInt(-128))); try testing.expectError(error.InvalidEnumTag, intToEnum(E1, one)); try testing.expectError(error.InvalidEnumTag, intToEnum(E3, 128)); try testing.expectError(error.InvalidEnumTag, intToEnum(E3, -129)); @@ -943,7 +943,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa if (!enum_info.is_exhaustive) { if (std.math.cast(enum_info.tag_type, tag_int)) |tag| { - return @enumFromInt(EnumTag, tag); + return @as(EnumTag, @enumFromInt(tag)); } return error.InvalidEnumTag; } diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig index cf37fc5adfb6..d028f8806612 100644 --- a/lib/std/meta/trailer_flags.zig +++ b/lib/std/meta/trailer_flags.zig @@ -72,7 +72,7 @@ pub fn TrailerFlags(comptime Fields: type) type { pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void { inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| { if (@field(fields, field.name)) |value| - self.set(p, @enumFromInt(FieldEnum, i), value); + self.set(p, @as(FieldEnum, @enumFromInt(i)), value); } } @@ -89,14 +89,14 @@ pub fn TrailerFlags(comptime Fields: type) type { if (@sizeOf(Field(field)) == 0) return undefined; const off = self.offset(field); - return @ptrCast(*Field(field), @alignCast(@alignOf(Field(field)), p + off)); + return @ptrCast(@alignCast(p + off)); } pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) *const Field(field) { if (@sizeOf(Field(field)) == 0) return undefined; const off = self.offset(field); - return @ptrCast(*const Field(field), @alignCast(@alignOf(Field(field)), p + off)); + return @ptrCast(@alignCast(p + off)); } pub fn offset(self: Self, comptime field: FieldEnum) usize { diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig index 3d0e0bce5d38..e00fac261cbf 100644 --- a/lib/std/meta/trait.zig +++ b/lib/std/meta/trait.zig @@ -237,7 +237,7 @@ pub fn isManyItemPtr(comptime T: type) bool { test "isManyItemPtr" { const array = [_]u8{0} ** 10; - const mip = @ptrCast([*]const u8, &array[0]); + const mip = @as([*]const u8, @ptrCast(&array[0])); try testing.expect(isManyItemPtr(@TypeOf(mip))); try testing.expect(!isManyItemPtr(@TypeOf(array))); try testing.expect(!isManyItemPtr(@TypeOf(array[0..1]))); diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 26ba6cc919ae..ffbff62da21c 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -78,7 +78,7 @@ pub fn MultiArrayList(comptime T: type) type { const casted_ptr: [*]F = if (@sizeOf(F) == 0) undefined else - @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr)); + @ptrCast(@alignCast(byte_ptr)); return casted_ptr[0..self.len]; } @@ -89,14 +89,14 @@ pub fn MultiArrayList(comptime T: type) type { else => unreachable, }; inline for (fields, 0..) |field_info, i| { - self.items(@enumFromInt(Field, i))[index] = @field(e, field_info.name); + self.items(@as(Field, @enumFromInt(i)))[index] = @field(e, field_info.name); } } pub fn get(self: Slice, index: usize) T { var result: Elem = undefined; inline for (fields, 0..) |field_info, i| { - @field(result, field_info.name) = self.items(@enumFromInt(Field, i))[index]; + @field(result, field_info.name) = self.items(@as(Field, @enumFromInt(i)))[index]; } return switch (@typeInfo(T)) { .Struct => result, @@ -110,10 +110,9 @@ pub fn MultiArrayList(comptime T: type) type { return .{}; } const unaligned_ptr = self.ptrs[sizes.fields[0]]; - const aligned_ptr = @alignCast(@alignOf(Elem), unaligned_ptr); - const casted_ptr = @ptrCast([*]align(@alignOf(Elem)) u8, aligned_ptr); + const aligned_ptr: [*]align(@alignOf(Elem)) u8 = @alignCast(unaligned_ptr); return .{ - .bytes = casted_ptr, + .bytes = aligned_ptr, .len = self.len, .capacity = self.capacity, }; @@ -294,7 +293,7 @@ pub fn MultiArrayList(comptime T: type) type { }; const slices = self.slice(); inline for (fields, 0..) |field_info, field_index| { - const field_slice = slices.items(@enumFromInt(Field, field_index)); + const field_slice = slices.items(@as(Field, @enumFromInt(field_index))); var i: usize = self.len - 1; while (i > index) : (i -= 1) { field_slice[i] = field_slice[i - 1]; @@ -309,7 +308,7 @@ pub fn MultiArrayList(comptime T: type) type { pub fn swapRemove(self: *Self, index: usize) void { const slices = self.slice(); inline for (fields, 0..) |_, i| { - const field_slice = slices.items(@enumFromInt(Field, i)); + const field_slice = slices.items(@as(Field, @enumFromInt(i))); field_slice[index] = field_slice[self.len - 1]; field_slice[self.len - 1] = undefined; } @@ -321,7 +320,7 @@ pub fn MultiArrayList(comptime T: type) type { pub fn orderedRemove(self: *Self, index: usize) void { const slices = self.slice(); inline for (fields, 0..) |_, field_index| { - const field_slice = slices.items(@enumFromInt(Field, field_index)); + const field_slice = slices.items(@as(Field, @enumFromInt(field_index))); var i = index; while (i < self.len - 1) : (i += 1) { field_slice[i] = field_slice[i + 1]; @@ -358,7 +357,7 @@ pub fn MultiArrayList(comptime T: type) type { const self_slice = self.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); const dest_slice = self_slice.items(field)[new_len..]; // We use memset here for more efficient codegen in safety-checked, // valgrind-enabled builds. Otherwise the valgrind client request @@ -379,7 +378,7 @@ pub fn MultiArrayList(comptime T: type) type { const other_slice = other.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); @memcpy(other_slice.items(field), self_slice.items(field)); } } @@ -440,7 +439,7 @@ pub fn MultiArrayList(comptime T: type) type { const other_slice = other.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); @memcpy(other_slice.items(field), self_slice.items(field)); } } @@ -459,7 +458,7 @@ pub fn MultiArrayList(comptime T: type) type { const result_slice = result.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); @memcpy(result_slice.items(field), self_slice.items(field)); } } @@ -476,7 +475,7 @@ pub fn MultiArrayList(comptime T: type) type { pub fn swap(sc: @This(), a_index: usize, b_index: usize) void { inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.type) != 0) { - const field = @enumFromInt(Field, i); + const field = @as(Field, @enumFromInt(i)); const ptr = sc.slice.items(field); mem.swap(field_info.type, &ptr[a_index], &ptr[b_index]); } @@ -592,9 +591,9 @@ test "basic usage" { var i: usize = 0; while (i < 6) : (i += 1) { try list.append(ally, .{ - .a = @intCast(u32, 4 + i), + .a = @as(u32, @intCast(4 + i)), .b = "whatever", - .c = @intCast(u8, 'd' + i), + .c = @as(u8, @intCast('d' + i)), }); } @@ -791,7 +790,7 @@ test "union" { // Add 6 more things to force a capacity increase. for (0..6) |i| { - try list.append(ally, .{ .a = @intCast(u32, 4 + i) }); + try list.append(ally, .{ .a = @as(u32, @intCast(4 + i)) }); } try testing.expectEqualSlices( diff --git a/lib/std/net.zig b/lib/std/net.zig index 0f8ecbf21e8f..af291f64147e 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -137,8 +137,8 @@ pub const Address = extern union { /// on the address family. pub fn initPosix(addr: *align(4) const os.sockaddr) Address { switch (addr.family) { - os.AF.INET => return Address{ .in = Ip4Address{ .sa = @ptrCast(*const os.sockaddr.in, addr).* } }, - os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @ptrCast(*const os.sockaddr.in6, addr).* } }, + os.AF.INET => return Address{ .in = Ip4Address{ .sa = @as(*const os.sockaddr.in, @ptrCast(addr)).* } }, + os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @as(*const os.sockaddr.in6, @ptrCast(addr)).* } }, else => unreachable, } } @@ -165,8 +165,8 @@ pub const Address = extern union { } pub fn eql(a: Address, b: Address) bool { - const a_bytes = @ptrCast([*]const u8, &a.any)[0..a.getOsSockLen()]; - const b_bytes = @ptrCast([*]const u8, &b.any)[0..b.getOsSockLen()]; + const a_bytes = @as([*]const u8, @ptrCast(&a.any))[0..a.getOsSockLen()]; + const b_bytes = @as([*]const u8, @ptrCast(&b.any))[0..b.getOsSockLen()]; return mem.eql(u8, a_bytes, b_bytes); } @@ -187,7 +187,7 @@ pub const Address = extern union { // provide the full buffer size (e.g. getsockname, getpeername, recvfrom, accept). // // To access the path, std.mem.sliceTo(&address.un.path, 0) should be used. - return @intCast(os.socklen_t, @sizeOf(os.sockaddr.un)); + return @as(os.socklen_t, @intCast(@sizeOf(os.sockaddr.un))); }, else => unreachable, @@ -260,7 +260,7 @@ pub const Ip4Address = extern struct { return Ip4Address{ .sa = os.sockaddr.in{ .port = mem.nativeToBig(u16, port), - .addr = @ptrCast(*align(1) const u32, &addr).*, + .addr = @as(*align(1) const u32, @ptrCast(&addr)).*, }, }; } @@ -285,7 +285,7 @@ pub const Ip4Address = extern struct { ) !void { if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self); _ = options; - const bytes = @ptrCast(*const [4]u8, &self.sa.addr); + const bytes = @as(*const [4]u8, @ptrCast(&self.sa.addr)); try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{ bytes[0], bytes[1], @@ -354,9 +354,9 @@ pub const Ip6Address = extern struct { if (index == 14) { return error.InvalidEnd; } - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; x = 0; @@ -408,13 +408,13 @@ pub const Ip6Address = extern struct { } if (index == 14) { - ip_slice[14] = @truncate(u8, x >> 8); - ip_slice[15] = @truncate(u8, x); + ip_slice[14] = @as(u8, @truncate(x >> 8)); + ip_slice[15] = @as(u8, @truncate(x)); return result; } else { - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; @memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]); return result; @@ -473,9 +473,9 @@ pub const Ip6Address = extern struct { if (index == 14) { return error.InvalidEnd; } - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; x = 0; @@ -542,13 +542,13 @@ pub const Ip6Address = extern struct { result.sa.scope_id = resolved_scope_id; if (index == 14) { - ip_slice[14] = @truncate(u8, x >> 8); - ip_slice[15] = @truncate(u8, x); + ip_slice[14] = @as(u8, @truncate(x >> 8)); + ip_slice[15] = @as(u8, @truncate(x)); return result; } else { - ip_slice[index] = @truncate(u8, x >> 8); + ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; - ip_slice[index] = @truncate(u8, x); + ip_slice[index] = @as(u8, @truncate(x)); index += 1; @memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]); return result; @@ -597,7 +597,7 @@ pub const Ip6Address = extern struct { }); return; } - const big_endian_parts = @ptrCast(*align(1) const [8]u16, &self.sa.addr); + const big_endian_parts = @as(*align(1) const [8]u16, @ptrCast(&self.sa.addr)); const native_endian_parts = switch (native_endian) { .Big => big_endian_parts.*, .Little => blk: { @@ -668,7 +668,7 @@ fn if_nametoindex(name: []const u8) !u32 { // TODO investigate if this needs to be integrated with evented I/O. try os.ioctl_SIOCGIFINDEX(sockfd, &ifr); - return @bitCast(u32, ifr.ifru.ivalue); + return @as(u32, @bitCast(ifr.ifru.ivalue)); } if (comptime builtin.target.os.tag.isDarwin()) { @@ -682,7 +682,7 @@ fn if_nametoindex(name: []const u8) !u32 { const index = os.system.if_nametoindex(if_slice); if (index == 0) return error.InterfaceNotFound; - return @bitCast(u32, index); + return @as(u32, @bitCast(index)); } @compileError("std.net.if_nametoindex unimplemented for this OS"); @@ -804,8 +804,8 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get var first = true; while (true) { const rc = ws2_32.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res); - switch (@enumFromInt(os.windows.ws2_32.WinsockError, @intCast(u16, rc))) { - @enumFromInt(os.windows.ws2_32.WinsockError, 0) => break, + switch (@as(os.windows.ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(rc))))) { + @as(os.windows.ws2_32.WinsockError, @enumFromInt(0)) => break, .WSATRY_AGAIN => return error.TemporaryNameServerFailure, .WSANO_RECOVERY => return error.NameServerFailure, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, @@ -841,7 +841,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get var i: usize = 0; while (it) |info| : (it = info.next) { const addr = info.addr orelse continue; - result.addrs[i] = Address.initPosix(@alignCast(4, addr)); + result.addrs[i] = Address.initPosix(@alignCast(addr)); if (info.canonname) |n| { if (result.canon_name == null) { @@ -874,7 +874,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get }; var res: ?*os.addrinfo = null; switch (sys.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res)) { - @enumFromInt(sys.EAI, 0) => {}, + @as(sys.EAI, @enumFromInt(0)) => {}, .ADDRFAMILY => return error.HostLacksNetworkAddresses, .AGAIN => return error.TemporaryNameServerFailure, .BADFLAGS => unreachable, // Invalid hints @@ -908,7 +908,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get var i: usize = 0; while (it) |info| : (it = info.next) { const addr = info.addr orelse continue; - result.addrs[i] = Address.initPosix(@alignCast(4, addr)); + result.addrs[i] = Address.initPosix(@alignCast(addr)); if (info.canonname) |n| { if (result.canon_name == null) { @@ -1020,7 +1020,7 @@ fn linuxLookupName( for (addrs.items, 0..) |*addr, i| { var key: i32 = 0; var sa6: os.sockaddr.in6 = undefined; - @memset(@ptrCast([*]u8, &sa6)[0..@sizeOf(os.sockaddr.in6)], 0); + @memset(@as([*]u8, @ptrCast(&sa6))[0..@sizeOf(os.sockaddr.in6)], 0); var da6 = os.sockaddr.in6{ .family = os.AF.INET6, .scope_id = addr.addr.in6.sa.scope_id, @@ -1029,7 +1029,7 @@ fn linuxLookupName( .addr = [1]u8{0} ** 16, }; var sa4: os.sockaddr.in = undefined; - @memset(@ptrCast([*]u8, &sa4)[0..@sizeOf(os.sockaddr.in)], 0); + @memset(@as([*]u8, @ptrCast(&sa4))[0..@sizeOf(os.sockaddr.in)], 0); var da4 = os.sockaddr.in{ .family = os.AF.INET, .port = 65535, @@ -1042,18 +1042,18 @@ fn linuxLookupName( var dalen: os.socklen_t = undefined; if (addr.addr.any.family == os.AF.INET6) { da6.addr = addr.addr.in6.sa.addr; - da = @ptrCast(*os.sockaddr, &da6); + da = @ptrCast(&da6); dalen = @sizeOf(os.sockaddr.in6); - sa = @ptrCast(*os.sockaddr, &sa6); + sa = @ptrCast(&sa6); salen = @sizeOf(os.sockaddr.in6); } else { sa6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*; da6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*; mem.writeIntNative(u32, da6.addr[12..], addr.addr.in.sa.addr); da4.addr = addr.addr.in.sa.addr; - da = @ptrCast(*os.sockaddr, &da4); + da = @ptrCast(&da4); dalen = @sizeOf(os.sockaddr.in); - sa = @ptrCast(*os.sockaddr, &sa4); + sa = @ptrCast(&sa4); salen = @sizeOf(os.sockaddr.in); } const dpolicy = policyOf(da6.addr); @@ -1070,7 +1070,7 @@ fn linuxLookupName( os.getsockname(fd, sa, &salen) catch break :syscalls; if (addr.addr.any.family == os.AF.INET) { // TODO sa6.addr[12..16] should return *[4]u8, making this cast unnecessary. - mem.writeIntNative(u32, @ptrCast(*[4]u8, &sa6.addr[12]), sa4.addr); + mem.writeIntNative(u32, @as(*[4]u8, @ptrCast(&sa6.addr[12])), sa4.addr); } if (dscope == @as(i32, scopeOf(sa6.addr))) key |= DAS_MATCHINGSCOPE; if (dlabel == labelOf(sa6.addr)) key |= DAS_MATCHINGLABEL; @@ -1079,7 +1079,7 @@ fn linuxLookupName( key |= dprec << DAS_PREC_SHIFT; key |= (15 - dscope) << DAS_SCOPE_SHIFT; key |= prefixlen << DAS_PREFIX_SHIFT; - key |= (MAXADDRS - @intCast(i32, i)) << DAS_ORDER_SHIFT; + key |= (MAXADDRS - @as(i32, @intCast(i))) << DAS_ORDER_SHIFT; addr.sortkey = key; } mem.sort(LookupAddr, addrs.items, {}, addrCmpLessThan); @@ -1171,7 +1171,7 @@ fn prefixMatch(s: [16]u8, d: [16]u8) u8 { // address. However the definition of the source prefix length is // not clear and thus this limiting is not yet implemented. var i: u8 = 0; - while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @intCast(u3, i % 8))) == 0) : (i += 1) {} + while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @as(u3, @intCast(i % 8)))) == 0) : (i += 1) {} return i; } @@ -1577,7 +1577,7 @@ fn resMSendRc( // Get local address and open/bind a socket var sa: Address = undefined; - @memset(@ptrCast([*]u8, &sa)[0..@sizeOf(Address)], 0); + @memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0); sa.any.family = family; try os.bind(fd, &sa.any, sl); @@ -1588,13 +1588,13 @@ fn resMSendRc( }}; const retry_interval = timeout / attempts; var next: u32 = 0; - var t2: u64 = @bitCast(u64, std.time.milliTimestamp()); + var t2: u64 = @as(u64, @bitCast(std.time.milliTimestamp())); var t0 = t2; var t1 = t2 - retry_interval; var servfail_retry: usize = undefined; - outer: while (t2 - t0 < timeout) : (t2 = @bitCast(u64, std.time.milliTimestamp())) { + outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) { if (t2 - t1 >= retry_interval) { // Query all configured nameservers in parallel var i: usize = 0; diff --git a/lib/std/os.zig b/lib/std/os.zig index 872aeef611a7..2c49bd9f499f 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -494,7 +494,7 @@ pub fn getrandom(buffer: []u8) GetRandomError!void { const res = if (use_c) blk: { const rc = std.c.getrandom(buf.ptr, buf.len, 0); break :blk .{ - .num_read = @bitCast(usize, rc), + .num_read = @as(usize, @bitCast(rc)), .err = std.c.getErrno(rc), }; } else blk: { @@ -608,7 +608,7 @@ pub fn abort() noreturn { sigprocmask(SIG.UNBLOCK, &sigabrtmask, null); // Beyond this point should be unreachable. - @ptrFromInt(*allowzero volatile u8, 0).* = 0; + @as(*allowzero volatile u8, @ptrFromInt(0)).* = 0; raise(SIG.KILL) catch {}; exit(127); // Pid 1 might not be signalled in some containers. } @@ -678,10 +678,10 @@ pub fn exit(status: u8) noreturn { // exit() is only available if exitBootServices() has not been called yet. // This call to exit should not fail, so we don't care about its return value. if (uefi.system_table.boot_services) |bs| { - _ = bs.exit(uefi.handle, @enumFromInt(uefi.Status, status), 0, null); + _ = bs.exit(uefi.handle, @as(uefi.Status, @enumFromInt(status)), 0, null); } // If we can't exit, reboot the system instead. - uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @enumFromInt(uefi.Status, status), 0, null); + uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @as(uefi.Status, @enumFromInt(status)), 0, null); } system.exit(status); } @@ -759,7 +759,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize { while (true) { const rc = system.read(fd, buf.ptr, adjusted_len); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -818,7 +818,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize { // TODO handle the case when iov_len is too large and get rid of this @intCast const rc = system.readv(fd, iov.ptr, iov_count); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -892,11 +892,11 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { const pread_sym = if (lfs64_abi) system.pread64 else system.pread; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -929,7 +929,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { if (builtin.os.tag == .windows) { var io_status_block: windows.IO_STATUS_BLOCK = undefined; var eof_info = windows.FILE_END_OF_FILE_INFORMATION{ - .EndOfFile = @bitCast(windows.LARGE_INTEGER, length), + .EndOfFile = @as(windows.LARGE_INTEGER, @bitCast(length)), }; const rc = windows.ntdll.NtSetInformationFile( @@ -965,7 +965,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { while (true) { const ftruncate_sym = if (lfs64_abi) system.ftruncate64 else system.ftruncate; - const ilen = @bitCast(i64, length); // the OS treats this as unsigned + const ilen = @as(i64, @bitCast(length)); // the OS treats this as unsigned switch (errno(ftruncate_sym(fd, ilen))) { .SUCCESS => return, .INTR => continue, @@ -1001,7 +1001,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { if (have_pread_but_not_preadv) { // We could loop here; but proper usage of `preadv` must handle partial reads anyway. // So we simply read into the first vector only. - if (iov.len == 0) return @intCast(usize, 0); + if (iov.len == 0) return @as(usize, @intCast(0)); const first = iov[0]; return pread(fd, first.iov_base[0..first.iov_len], offset); } @@ -1030,11 +1030,11 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { const preadv_sym = if (lfs64_abi) system.preadv64 else system.preadv; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { - .SUCCESS => return @bitCast(usize, rc), + .SUCCESS => return @as(usize, @bitCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, @@ -1143,7 +1143,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { while (true) { const rc = system.write(fd, bytes.ptr, adjusted_len); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1212,11 +1212,11 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize { } } - const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len); + const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); while (true) { const rc = system.writev(fd, iov.ptr, iov_count); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1304,11 +1304,11 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize { const pwrite_sym = if (lfs64_abi) system.pwrite64 else system.pwrite; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1390,12 +1390,12 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz const pwritev_sym = if (lfs64_abi) system.pwritev64 else system.pwritev; - const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len); - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, @@ -1504,7 +1504,7 @@ pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t while (true) { const rc = open_sym(file_path, flags, perm); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, @@ -1653,11 +1653,11 @@ fn openOptionsFromFlagsWasi(fd: fd_t, oflag: u32) OpenError!WasiOpenOptions { rights &= fsb_cur.fs_rights_inheriting; return WasiOpenOptions{ - .oflags = @truncate(w.oflags_t, (oflag >> 12)) & 0xfff, + .oflags = @as(w.oflags_t, @truncate((oflag >> 12))) & 0xfff, .lookup_flags = if (oflag & O.NOFOLLOW == 0) w.LOOKUP_SYMLINK_FOLLOW else 0, .fs_rights_base = rights, .fs_rights_inheriting = fsb_cur.fs_rights_inheriting, - .fs_flags = @truncate(w.fdflags_t, oflag & 0xfff), + .fs_flags = @as(w.fdflags_t, @truncate(oflag & 0xfff)), }; } @@ -1717,7 +1717,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) while (true) { const rc = openat_sym(dir_fd, file_path, flags, mode); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, @@ -1765,7 +1765,7 @@ pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) pub fn dup(old_fd: fd_t) !fd_t { const rc = system.dup(old_fd); return switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .MFILE => error.ProcessFdQuotaExceeded, .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), @@ -2024,7 +2024,7 @@ pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 { const err = if (builtin.link_libc) blk: { const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*; - break :blk @enumFromInt(E, c_err); + break :blk @as(E, @enumFromInt(c_err)); } else blk: { break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len)); }; @@ -2661,12 +2661,12 @@ pub fn renameatW( const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2; if (struct_len > struct_buf_len) return error.NameTooLong; - const rename_info = @ptrCast(*windows.FILE_RENAME_INFORMATION, &rename_info_buf); + const rename_info = @as(*windows.FILE_RENAME_INFORMATION, @ptrCast(&rename_info_buf)); rename_info.* = .{ .ReplaceIfExists = ReplaceIfExists, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd, - .FileNameLength = @intCast(u32, new_path_w.len * 2), // already checked error.NameTooLong + .FileNameLength = @as(u32, @intCast(new_path_w.len * 2)), // already checked error.NameTooLong .FileName = undefined, }; @memcpy(@as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w); @@ -2677,7 +2677,7 @@ pub fn renameatW( src_fd, &io_status_block, rename_info, - @intCast(u32, struct_len), // already checked for error.NameTooLong + @as(u32, @intCast(struct_len)), // already checked for error.NameTooLong .FileRenameInformation, ); @@ -3049,7 +3049,7 @@ pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 } const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { - .SUCCESS => return out_buffer[0..@bitCast(usize, rc)], + .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, @@ -3115,7 +3115,7 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read } const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { - .SUCCESS => return out_buffer[0..@bitCast(usize, rc)], + .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, @@ -3227,7 +3227,7 @@ pub fn isatty(handle: fd_t) bool { if (builtin.os.tag == .linux) { while (true) { var wsz: linux.winsize = undefined; - const fd = @bitCast(usize, @as(isize, handle)); + const fd = @as(usize, @bitCast(@as(isize, handle))); const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz)); switch (linux.getErrno(rc)) { .SUCCESS => return true, @@ -3271,14 +3271,14 @@ pub fn isCygwinPty(handle: fd_t) bool { var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes); var io_status_block: windows.IO_STATUS_BLOCK = undefined; - const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(u32, name_info_bytes.len), .FileNameInformation); + const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @as(u32, @intCast(name_info_bytes.len)), .FileNameInformation); switch (rc) { .SUCCESS => {}, .INVALID_PARAMETER => unreachable, else => return false, } - const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]); + const name_info = @as(*const windows.FILE_NAME_INFO, @ptrCast(&name_info_bytes[0])); const name_bytes = name_info_bytes[name_bytes_offset .. name_bytes_offset + @as(usize, name_info.FileNameLength)]; const name_wide = mem.bytesAsSlice(u16, name_bytes); // Note: The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master @@ -3325,9 +3325,9 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t else 0; const rc = try windows.WSASocketW( - @bitCast(i32, domain), - @bitCast(i32, filtered_sock_type), - @bitCast(i32, protocol), + @as(i32, @bitCast(domain)), + @as(i32, @bitCast(filtered_sock_type)), + @as(i32, @bitCast(protocol)), null, 0, flags, @@ -3353,7 +3353,7 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t const rc = system.socket(domain, filtered_sock_type, protocol); switch (errno(rc)) { .SUCCESS => { - const fd = @intCast(fd_t, rc); + const fd = @as(fd_t, @intCast(rc)); if (!have_sock_flags) { try setSockFlags(fd, socket_type); } @@ -3679,7 +3679,7 @@ pub fn accept( } else { switch (errno(rc)) { .SUCCESS => { - break @intCast(socket_t, rc); + break @as(socket_t, @intCast(rc)); }, .INTR => continue, .AGAIN => return error.WouldBlock, @@ -3723,7 +3723,7 @@ pub const EpollCreateError = error{ pub fn epoll_create1(flags: u32) EpollCreateError!i32 { const rc = system.epoll_create1(flags); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, @@ -3782,9 +3782,9 @@ pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollC pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize { while (true) { // TODO get rid of the @intCast - const rc = system.epoll_wait(epfd, events.ptr, @intCast(u32, events.len), timeout); + const rc = system.epoll_wait(epfd, events.ptr, @as(u32, @intCast(events.len)), timeout); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .BADF => unreachable, .FAULT => unreachable, @@ -3803,7 +3803,7 @@ pub const EventFdError = error{ pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 { const rc = system.eventfd(initval, flags); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, // invalid parameters @@ -3937,7 +3937,7 @@ pub const ConnectError = error{ /// return error.WouldBlock when EAGAIN or EINPROGRESS is received. pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void { if (builtin.os.tag == .windows) { - const rc = windows.ws2_32.connect(sock, sock_addr, @intCast(i32, len)); + const rc = windows.ws2_32.connect(sock, sock_addr, @as(i32, @intCast(len))); if (rc == 0) return; switch (windows.ws2_32.WSAGetLastError()) { .WSAEADDRINUSE => return error.AddressInUse, @@ -3992,10 +3992,10 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne pub fn getsockoptError(sockfd: fd_t) ConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(u32); - const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @ptrCast([*]u8, &err_code), &size); + const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @as([*]u8, @ptrCast(&err_code)), &size); assert(size == 4); switch (errno(rc)) { - .SUCCESS => switch (@enumFromInt(E, err_code)) { + .SUCCESS => switch (@as(E, @enumFromInt(err_code))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, @@ -4035,13 +4035,13 @@ pub const WaitPidResult = struct { pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; - const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags; + const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags; while (true) { const rc = system.waitpid(pid, &status, coerced_flags); switch (errno(rc)) { .SUCCESS => return .{ - .pid = @intCast(pid_t, rc), - .status = @bitCast(u32, status), + .pid = @as(pid_t, @intCast(rc)), + .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. @@ -4054,13 +4054,13 @@ pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { pub fn wait4(pid: pid_t, flags: u32, ru: ?*rusage) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; - const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags; + const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags; while (true) { const rc = system.wait4(pid, &status, coerced_flags, ru); switch (errno(rc)) { .SUCCESS => return .{ - .pid = @intCast(pid_t, rc), - .status = @bitCast(u32, status), + .pid = @as(pid_t, @intCast(rc)), + .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. @@ -4182,7 +4182,7 @@ pub const KQueueError = error{ pub fn kqueue() KQueueError!i32 { const rc = system.kqueue(); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, else => |err| return unexpectedErrno(err), @@ -4223,7 +4223,7 @@ pub fn kevent( timeout, ); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .FAULT => unreachable, .BADF => unreachable, // Always a race condition. @@ -4247,7 +4247,7 @@ pub const INotifyInitError = error{ pub fn inotify_init1(flags: u32) INotifyInitError!i32 { const rc = system.inotify_init1(flags); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, @@ -4276,7 +4276,7 @@ pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INoti pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 { const rc = system.inotify_add_watch(inotify_fd, pathname, mask); switch (errno(rc)) { - .SUCCESS => return @intCast(i32, rc), + .SUCCESS => return @as(i32, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => unreachable, .FAULT => unreachable, @@ -4319,7 +4319,7 @@ pub const MProtectError = error{ pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { assert(mem.isAligned(memory.len, mem.page_size)); if (builtin.os.tag == .windows) { - const win_prot: windows.DWORD = switch (@truncate(u3, protection)) { + const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) { 0b000 => windows.PAGE_NOACCESS, 0b001 => windows.PAGE_READONLY, 0b010 => unreachable, // +w -r not allowed @@ -4350,7 +4350,7 @@ pub const ForkError = error{SystemResources} || UnexpectedError; pub fn fork() ForkError!pid_t { const rc = system.fork(); switch (errno(rc)) { - .SUCCESS => return @intCast(pid_t, rc), + .SUCCESS => return @as(pid_t, @intCast(rc)), .AGAIN => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), @@ -4391,14 +4391,14 @@ pub fn mmap( ) MMapError![]align(mem.page_size) u8 { const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset); const err = if (builtin.link_libc) blk: { - if (rc != std.c.MAP.FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length]; - break :blk @enumFromInt(E, system._errno().*); + if (rc != std.c.MAP.FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length]; + break :blk @as(E, @enumFromInt(system._errno().*)); } else blk: { const err = errno(rc); - if (err == .SUCCESS) return @ptrFromInt([*]align(mem.page_size) u8, rc)[0..length]; + if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length]; break :blk err; }; switch (err) { @@ -4781,7 +4781,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; - switch (wasi.fd_seek(fd, @bitCast(wasi.filedelta_t, offset), .SET, &new_offset)) { + switch (wasi.fd_seek(fd, @as(wasi.filedelta_t, @bitCast(offset)), .SET, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, @@ -4795,7 +4795,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition @@ -4811,7 +4811,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; - switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.CUR))) { + switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, @@ -4839,7 +4839,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { } const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition @@ -4855,7 +4855,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; - switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.END))) { + switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, @@ -4883,7 +4883,7 @@ pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { } const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek; - const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition @@ -4929,7 +4929,7 @@ pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 { const rc = lseek_sym(fd, 0, SEEK.CUR); switch (errno(rc)) { - .SUCCESS => return @bitCast(u64, rc), + .SUCCESS => return @as(u64, @bitCast(rc)), .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, @@ -4952,7 +4952,7 @@ pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize { while (true) { const rc = system.fcntl(fd, cmd, arg); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .AGAIN, .ACCES => return error.Locked, .BADF => unreachable, @@ -5122,7 +5122,7 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealP return getFdPath(fd, out_buffer); } - const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@enumFromInt(E, std.c._errno().*)) { + const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) { .SUCCESS => unreachable, .INVAL => unreachable, .BADF => unreachable, @@ -5269,7 +5269,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { }; var i: usize = 0; while (i < len) { - const kf: *align(1) system.kinfo_file = @ptrCast(*align(1) system.kinfo_file, &buf[i]); + const kf: *align(1) system.kinfo_file = @as(*align(1) system.kinfo_file, @ptrCast(&buf[i])); if (kf.fd == fd) { len = mem.indexOfScalar(u8, &kf.path, 0) orelse MAX_PATH_BYTES; if (len == 0) return error.NameTooLong; @@ -5277,7 +5277,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { @memcpy(result, kf.path[0..len]); return result; } - i += @intCast(usize, kf.structsize); + i += @as(usize, @intCast(kf.structsize)); } return error.InvalidHandle; } @@ -5357,22 +5357,22 @@ pub fn dl_iterate_phdr( if (builtin.link_libc) { switch (system.dl_iterate_phdr(struct { fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int { - const context_ptr = @ptrCast(*const Context, @alignCast(@alignOf(*const Context), data)); + const context_ptr: *const Context = @ptrCast(@alignCast(data)); callback(info, size, context_ptr.*) catch |err| return @intFromError(err); return 0; } - }.callbackC, @ptrFromInt(?*anyopaque, @intFromPtr(&context)))) { + }.callbackC, @as(?*anyopaque, @ptrFromInt(@intFromPtr(&context))))) { 0 => return, - else => |err| return @errSetCast(Error, @errorFromInt(@intCast(u16, err))), // TODO don't hardcode u16 + else => |err| return @as(Error, @errSetCast(@errorFromInt(@as(u16, @intCast(err))))), // TODO don't hardcode u16 } } const elf_base = std.process.getBaseAddress(); - const ehdr = @ptrFromInt(*elf.Ehdr, elf_base); + const ehdr = @as(*elf.Ehdr, @ptrFromInt(elf_base)); // Make sure the base address points to an ELF image. assert(mem.eql(u8, ehdr.e_ident[0..4], elf.MAGIC)); const n_phdr = ehdr.e_phnum; - const phdrs = (@ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff))[0..n_phdr]; + const phdrs = (@as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)))[0..n_phdr]; var it = dl.linkmap_iterator(phdrs) catch unreachable; @@ -5406,12 +5406,12 @@ pub fn dl_iterate_phdr( var dlpi_phnum: u16 = undefined; if (entry.l_addr != 0) { - const elf_header = @ptrFromInt(*elf.Ehdr, entry.l_addr); - dlpi_phdr = @ptrFromInt([*]elf.Phdr, entry.l_addr + elf_header.e_phoff); + const elf_header = @as(*elf.Ehdr, @ptrFromInt(entry.l_addr)); + dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(entry.l_addr + elf_header.e_phoff)); dlpi_phnum = elf_header.e_phnum; } else { // This is the running ELF image - dlpi_phdr = @ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff); + dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)); dlpi_phnum = ehdr.e_phnum; } @@ -5433,11 +5433,11 @@ pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError; pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; - switch (system.clock_time_get(@bitCast(u32, clk_id), 1, &ts)) { + switch (system.clock_time_get(@as(u32, @bitCast(clk_id)), 1, &ts)) { .SUCCESS => { tp.* = .{ - .tv_sec = @intCast(i64, ts / std.time.ns_per_s), - .tv_nsec = @intCast(isize, ts % std.time.ns_per_s), + .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), + .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }; }, .INVAL => return error.UnsupportedClock, @@ -5453,8 +5453,8 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; const ft_per_s = std.time.ns_per_s / 100; tp.* = .{ - .tv_sec = @intCast(i64, ft64 / ft_per_s) + std.time.epoch.windows, - .tv_nsec = @intCast(c_long, ft64 % ft_per_s) * 100, + .tv_sec = @as(i64, @intCast(ft64 / ft_per_s)) + std.time.epoch.windows, + .tv_nsec = @as(c_long, @intCast(ft64 % ft_per_s)) * 100, }; return; } else { @@ -5474,10 +5474,10 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; - switch (system.clock_res_get(@bitCast(u32, clk_id), &ts)) { + switch (system.clock_res_get(@as(u32, @bitCast(clk_id)), &ts)) { .SUCCESS => res.* = .{ - .tv_sec = @intCast(i64, ts / std.time.ns_per_s), - .tv_nsec = @intCast(isize, ts % std.time.ns_per_s), + .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), + .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), @@ -5747,7 +5747,7 @@ pub fn res_mkquery( // TODO determine the circumstances for this and whether or // not this should be an error. if (j - i - 1 > 62) unreachable; - q[i - 1] = @intCast(u8, j - i); + q[i - 1] = @as(u8, @intCast(j - i)); } q[i + 1] = ty; q[i + 3] = class; @@ -5756,10 +5756,10 @@ pub fn res_mkquery( var ts: timespec = undefined; clock_gettime(CLOCK.REALTIME, &ts) catch {}; const UInt = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(ts.tv_nsec))); - const unsec = @bitCast(UInt, ts.tv_nsec); - const id = @truncate(u32, unsec + unsec / 65536); - q[0] = @truncate(u8, id / 256); - q[1] = @truncate(u8, id); + const unsec = @as(UInt, @bitCast(ts.tv_nsec)); + const id = @as(u32, @truncate(unsec + unsec / 65536)); + q[0] = @as(u8, @truncate(id / 256)); + q[1] = @as(u8, @truncate(id)); @memcpy(buf[0..n], q[0..n]); return n; @@ -5865,11 +5865,11 @@ pub fn sendmsg( else => |err| return windows.unexpectedWSAError(err), } } else { - return @intCast(usize, rc); + return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, @@ -5965,13 +5965,13 @@ pub fn sendto( .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), }, - else => |rc| return @intCast(usize, rc), + else => |rc| return @as(usize, @intCast(rc)), } } while (true) { const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, @@ -6125,16 +6125,16 @@ pub fn sendfile( // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count)); // TODO we should not need this cast; improve return type of @min - const adjusted_count = @intCast(usize, adjusted_count_tmp); + const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const sendfile_sym = if (lfs64_abi) system.sendfile64 else system.sendfile; while (true) { - var offset: off_t = @bitCast(off_t, in_offset); + var offset: off_t = @as(off_t, @bitCast(in_offset)); const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count); switch (errno(rc)) { .SUCCESS => { - const amt = @bitCast(usize, rc); + const amt = @as(usize, @bitCast(rc)); total_written += amt; if (in_len == 0 and amt == 0) { // We have detected EOF from `in_fd`. @@ -6209,9 +6209,9 @@ pub fn sendfile( while (true) { var sbytes: off_t = undefined; - const offset = @bitCast(off_t, in_offset); + const offset = @as(off_t, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags)); - const amt = @bitCast(usize, sbytes); + const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, @@ -6286,13 +6286,13 @@ pub fn sendfile( const adjusted_count_temporary = @min(in_len, @as(u63, max_count)); // TODO we should not need this int cast; improve the return type of `@min` - const adjusted_count = @intCast(u63, adjusted_count_temporary); + const adjusted_count = @as(u63, @intCast(adjusted_count_temporary)); while (true) { var sbytes: off_t = adjusted_count; - const signed_offset = @bitCast(i64, in_offset); + const signed_offset = @as(i64, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags)); - const amt = @bitCast(usize, sbytes); + const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, @@ -6342,7 +6342,7 @@ pub fn sendfile( // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len); // TODO we should not need this cast; improve return type of @min - const adjusted_count = @intCast(usize, adjusted_count_tmp); + const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset); if (amt_read == 0) { if (in_len == 0) { @@ -6413,14 +6413,14 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok) and has_copy_file_range_syscall.load(.Monotonic))) { - var off_in_copy = @bitCast(i64, off_in); - var off_out_copy = @bitCast(i64, off_out); + var off_in_copy = @as(i64, @bitCast(off_in)); + var off_out_copy = @as(i64, @bitCast(off_out)); while (true) { const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags); if (builtin.os.tag == .freebsd) { switch (system.getErrno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, @@ -6433,7 +6433,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len } } else { // assume linux switch (system.getErrno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, @@ -6486,11 +6486,11 @@ pub fn poll(fds: []pollfd, timeout: i32) PollError!usize { else => |err| return windows.unexpectedWSAError(err), } } else { - return @intCast(usize, rc); + return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => continue, .INVAL => unreachable, @@ -6520,7 +6520,7 @@ pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) P const fds_count = math.cast(nfds_t, fds.len) orelse return error.SystemResources; const rc = system.ppoll(fds.ptr, fds_count, ts_ptr, mask); switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => return error.SignalInterrupt, .INVAL => unreachable, @@ -6585,11 +6585,11 @@ pub fn recvfrom( else => |err| return windows.unexpectedWSAError(err), } } else { - return @intCast(usize, rc); + return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { - .SUCCESS => return @intCast(usize, rc), + .SUCCESS => return @as(usize, @intCast(rc)), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, @@ -6681,7 +6681,7 @@ pub const SetSockOptError = error{ /// Set a socket's options. pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void { if (builtin.os.tag == .windows) { - const rc = windows.ws2_32.setsockopt(fd, @intCast(i32, level), @intCast(i32, optname), opt.ptr, @intCast(i32, opt.len)); + const rc = windows.ws2_32.setsockopt(fd, @as(i32, @intCast(level)), @as(i32, @intCast(optname)), opt.ptr, @as(i32, @intCast(opt.len))); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, @@ -6694,7 +6694,7 @@ pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSo } return; } else { - switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) { + switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @as(socklen_t, @intCast(opt.len))))) { .SUCCESS => {}, .BADF => unreachable, // always a race condition .NOTSOCK => unreachable, // always a race condition @@ -6731,7 +6731,7 @@ pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t { const getErrno = if (use_c) std.c.getErrno else linux.getErrno; const rc = sys.memfd_create(name, flags); switch (getErrno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .FAULT => unreachable, // name has invalid memory .INVAL => unreachable, // name/flags are faulty .NFILE => return error.SystemFdQuotaExceeded, @@ -6881,7 +6881,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void { pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t { const rc = system.signalfd(fd, mask, flags); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .BADF, .INVAL => unreachable, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, @@ -6989,7 +6989,7 @@ pub fn prctl(option: PR, args: anytype) PrctlError!u31 { const rc = system.prctl(@intFromEnum(option), buf[0], buf[1], buf[2], buf[3]); switch (errno(rc)) { - .SUCCESS => return @intCast(u31, rc), + .SUCCESS => return @as(u31, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => return error.InvalidFileDescriptor, .FAULT => return error.InvalidAddress, @@ -7170,7 +7170,7 @@ pub fn perf_event_open( ) PerfEventOpenError!fd_t { const rc = system.perf_event_open(attr, pid, cpu, group_fd, flags); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .@"2BIG" => return error.TooBig, .ACCES => return error.PermissionDenied, .BADF => unreachable, // group_fd file descriptor is not valid. @@ -7205,7 +7205,7 @@ pub const TimerFdSetError = TimerFdGetError || error{Canceled}; pub fn timerfd_create(clokid: i32, flags: u32) TimerFdCreateError!fd_t { var rc = linux.timerfd_create(clokid, flags); return switch (errno(rc)) { - .SUCCESS => @intCast(fd_t, rc), + .SUCCESS => @as(fd_t, @intCast(rc)), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, @@ -7267,7 +7267,7 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, signal: usize) PtraceError! .macos, .ios, .tvos, .watchos => switch (errno(darwin.ptrace( math.cast(i32, request) orelse return error.Overflow, pid, - @ptrFromInt(?[*]u8, addr), + @as(?[*]u8, @ptrFromInt(addr)), math.cast(i32, signal) orelse return error.Overflow, ))) { .SUCCESS => {}, diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index b7ec29383b46..6362e9ece1a4 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -175,62 +175,62 @@ const require_aligned_register_pair = // Split a 64bit value into a {LSB,MSB} pair. // The LE/BE variants specify the endianness to assume. fn splitValueLE64(val: i64) [2]u32 { - const u = @bitCast(u64, val); + const u = @as(u64, @bitCast(val)); return [2]u32{ - @truncate(u32, u), - @truncate(u32, u >> 32), + @as(u32, @truncate(u)), + @as(u32, @truncate(u >> 32)), }; } fn splitValueBE64(val: i64) [2]u32 { - const u = @bitCast(u64, val); + const u = @as(u64, @bitCast(val)); return [2]u32{ - @truncate(u32, u >> 32), - @truncate(u32, u), + @as(u32, @truncate(u >> 32)), + @as(u32, @truncate(u)), }; } fn splitValue64(val: i64) [2]u32 { - const u = @bitCast(u64, val); + const u = @as(u64, @bitCast(val)); switch (native_endian) { .Little => return [2]u32{ - @truncate(u32, u), - @truncate(u32, u >> 32), + @as(u32, @truncate(u)), + @as(u32, @truncate(u >> 32)), }, .Big => return [2]u32{ - @truncate(u32, u >> 32), - @truncate(u32, u), + @as(u32, @truncate(u >> 32)), + @as(u32, @truncate(u)), }, } } /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: usize) E { - const signed_r = @bitCast(isize, r); + const signed_r = @as(isize, @bitCast(r)); const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0; - return @enumFromInt(E, int); + return @as(E, @enumFromInt(int)); } pub fn dup(old: i32) usize { - return syscall1(.dup, @bitCast(usize, @as(isize, old))); + return syscall1(.dup, @as(usize, @bitCast(@as(isize, old)))); } pub fn dup2(old: i32, new: i32) usize { if (@hasField(SYS, "dup2")) { - return syscall2(.dup2, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new))); + return syscall2(.dup2, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new)))); } else { if (old == new) { if (std.debug.runtime_safety) { - const rc = syscall2(.fcntl, @bitCast(usize, @as(isize, old)), F.GETFD); - if (@bitCast(isize, rc) < 0) return rc; + const rc = syscall2(.fcntl, @as(usize, @bitCast(@as(isize, old))), F.GETFD); + if (@as(isize, @bitCast(rc)) < 0) return rc; } - return @intCast(usize, old); + return @as(usize, @intCast(old)); } else { - return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), 0); + return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), 0); } } } pub fn dup3(old: i32, new: i32, flags: u32) usize { - return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), flags); + return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), flags); } pub fn chdir(path: [*:0]const u8) usize { @@ -238,7 +238,7 @@ pub fn chdir(path: [*:0]const u8) usize { } pub fn fchdir(fd: fd_t) usize { - return syscall1(.fchdir, @bitCast(usize, @as(isize, fd))); + return syscall1(.fchdir, @as(usize, @bitCast(@as(isize, fd)))); } pub fn chroot(path: [*:0]const u8) usize { @@ -273,7 +273,7 @@ pub fn futimens(fd: i32, times: *const [2]timespec) usize { } pub fn utimensat(dirfd: i32, path: ?[*:0]const u8, times: *const [2]timespec, flags: u32) usize { - return syscall4(.utimensat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(times), flags); + return syscall4(.utimensat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(times), flags); } pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { @@ -282,8 +282,8 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { const length_halves = splitValue64(length); return syscall6( .fallocate, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, @as(isize, mode)), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(@as(isize, mode))), offset_halves[0], offset_halves[1], length_halves[0], @@ -292,20 +292,20 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize { } else { return syscall4( .fallocate, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, @as(isize, mode)), - @bitCast(u64, offset), - @bitCast(u64, length), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(@as(isize, mode))), + @as(u64, @bitCast(offset)), + @as(u64, @bitCast(length)), ); } } pub fn futex_wait(uaddr: *const i32, futex_op: u32, val: i32, timeout: ?*const timespec) usize { - return syscall4(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val), @intFromPtr(timeout)); + return syscall4(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val)), @intFromPtr(timeout)); } pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize { - return syscall3(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val)); + return syscall3(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val))); } pub fn getcwd(buf: [*]u8, size: usize) usize { @@ -315,7 +315,7 @@ pub fn getcwd(buf: [*]u8, size: usize) usize { pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { return syscall3( .getdents, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(dirp), @min(len, maxInt(c_int)), ); @@ -324,7 +324,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize { return syscall3( .getdents64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(dirp), @min(len, maxInt(c_int)), ); @@ -335,35 +335,35 @@ pub fn inotify_init1(flags: u32) usize { } pub fn inotify_add_watch(fd: i32, pathname: [*:0]const u8, mask: u32) usize { - return syscall3(.inotify_add_watch, @bitCast(usize, @as(isize, fd)), @intFromPtr(pathname), mask); + return syscall3(.inotify_add_watch, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(pathname), mask); } pub fn inotify_rm_watch(fd: i32, wd: i32) usize { - return syscall2(.inotify_rm_watch, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, wd))); + return syscall2(.inotify_rm_watch, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, wd)))); } pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { if (@hasField(SYS, "readlink")) { return syscall3(.readlink, @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); } else { - return syscall4(.readlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); + return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); } } pub fn readlinkat(dirfd: i32, noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { - return syscall4(.readlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); + return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len); } pub fn mkdir(path: [*:0]const u8, mode: u32) usize { if (@hasField(SYS, "mkdir")) { return syscall2(.mkdir, @intFromPtr(path), mode); } else { - return syscall3(.mkdirat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode); + return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode); } } pub fn mkdirat(dirfd: i32, path: [*:0]const u8, mode: u32) usize { - return syscall3(.mkdirat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode); + return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode); } pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize { @@ -375,7 +375,7 @@ pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize { } pub fn mknodat(dirfd: i32, path: [*:0]const u8, mode: u32, dev: u32) usize { - return syscall4(.mknodat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, dev); + return syscall4(.mknodat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, dev); } pub fn mount(special: [*:0]const u8, dir: [*:0]const u8, fstype: ?[*:0]const u8, flags: u32, data: usize) usize { @@ -394,7 +394,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of if (@hasField(SYS, "mmap2")) { // Make sure the offset is also specified in multiples of page size if ((offset & (MMAP2_UNIT - 1)) != 0) - return @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL))); + return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL)))); return syscall6( .mmap2, @@ -402,8 +402,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of length, prot, flags, - @bitCast(usize, @as(isize, fd)), - @truncate(usize, @bitCast(u64, offset) / MMAP2_UNIT), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @truncate(@as(u64, @bitCast(offset)) / MMAP2_UNIT)), ); } else { return syscall6( @@ -412,8 +412,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of length, prot, flags, - @bitCast(usize, @as(isize, fd)), - @bitCast(u64, offset), + @as(usize, @bitCast(@as(isize, fd))), + @as(u64, @bitCast(offset)), ); } } @@ -429,7 +429,7 @@ pub const MSF = struct { }; pub fn msync(address: [*]const u8, length: usize, flags: i32) usize { - return syscall3(.msync, @intFromPtr(address), length, @bitCast(u32, flags)); + return syscall3(.msync, @intFromPtr(address), length, @as(u32, @bitCast(flags))); } pub fn munmap(address: [*]const u8, length: usize) usize { @@ -438,7 +438,7 @@ pub fn munmap(address: [*]const u8, length: usize) usize { pub fn poll(fds: [*]pollfd, n: nfds_t, timeout: i32) usize { if (@hasField(SYS, "poll")) { - return syscall3(.poll, @intFromPtr(fds), n, @bitCast(u32, timeout)); + return syscall3(.poll, @intFromPtr(fds), n, @as(u32, @bitCast(timeout))); } else { return syscall5( .ppoll, @@ -462,69 +462,69 @@ pub fn ppoll(fds: [*]pollfd, n: nfds_t, timeout: ?*timespec, sigmask: ?*const si } pub fn read(fd: i32, buf: [*]u8, count: usize) usize { - return syscall3(.read, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count); + return syscall3(.read, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count); } pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: i64) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall5( .preadv, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // Kernel expects the offset is split into largest natural word-size. // See following link for detail: // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=601cc11d054ae4b5e9b5babec3d8e4667a2cb9b5 - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, ); } pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: i64, flags: kernel_rwf) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall6( .preadv2, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // See comments in preadv - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, flags, ); } pub fn readv(fd: i32, iov: [*]const iovec, count: usize) usize { - return syscall3(.readv, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count); + return syscall3(.readv, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count); } pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize { - return syscall3(.writev, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count); + return syscall3(.writev, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count); } pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall5( .pwritev, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // See comments in preadv - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, ); } pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64, flags: kernel_rwf) usize { - const offset_u = @bitCast(u64, offset); + const offset_u = @as(u64, @bitCast(offset)); return syscall6( .pwritev2, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count, // See comments in preadv - @truncate(usize, offset_u), - if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0, + @as(usize, @truncate(offset_u)), + if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0, flags, ); } @@ -533,7 +533,7 @@ pub fn rmdir(path: [*:0]const u8) usize { if (@hasField(SYS, "rmdir")) { return syscall1(.rmdir, @intFromPtr(path)); } else { - return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), AT.REMOVEDIR); + return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), AT.REMOVEDIR); } } @@ -541,12 +541,12 @@ pub fn symlink(existing: [*:0]const u8, new: [*:0]const u8) usize { if (@hasField(SYS, "symlink")) { return syscall2(.symlink, @intFromPtr(existing), @intFromPtr(new)); } else { - return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new)); + return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new)); } } pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) usize { - return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, newfd)), @intFromPtr(newpath)); + return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath)); } pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { @@ -555,7 +555,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { if (require_aligned_register_pair) { return syscall6( .pread64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, 0, @@ -565,7 +565,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { } else { return syscall5( .pread64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, offset_halves[0], @@ -580,10 +580,10 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize { .pread; return syscall4( syscall_number, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, - @bitCast(u64, offset), + @as(u64, @bitCast(offset)), ); } } @@ -592,12 +592,12 @@ pub fn access(path: [*:0]const u8, mode: u32) usize { if (@hasField(SYS, "access")) { return syscall2(.access, @intFromPtr(path), mode); } else { - return syscall4(.faccessat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode, 0); + return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0); } } pub fn faccessat(dirfd: i32, path: [*:0]const u8, mode: u32, flags: u32) usize { - return syscall4(.faccessat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, flags); + return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, flags); } pub fn pipe(fd: *[2]i32) usize { @@ -615,7 +615,7 @@ pub fn pipe2(fd: *[2]i32, flags: u32) usize { } pub fn write(fd: i32, buf: [*]const u8, count: usize) usize { - return syscall3(.write, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count); + return syscall3(.write, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count); } pub fn ftruncate(fd: i32, length: i64) usize { @@ -624,7 +624,7 @@ pub fn ftruncate(fd: i32, length: i64) usize { if (require_aligned_register_pair) { return syscall4( .ftruncate64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), 0, length_halves[0], length_halves[1], @@ -632,7 +632,7 @@ pub fn ftruncate(fd: i32, length: i64) usize { } else { return syscall3( .ftruncate64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), length_halves[0], length_halves[1], ); @@ -640,8 +640,8 @@ pub fn ftruncate(fd: i32, length: i64) usize { } else { return syscall2( .ftruncate, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, length), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(length)), ); } } @@ -653,7 +653,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { if (require_aligned_register_pair) { return syscall6( .pwrite64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, 0, @@ -663,7 +663,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { } else { return syscall5( .pwrite64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, offset_halves[0], @@ -678,10 +678,10 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize { .pwrite; return syscall4( syscall_number, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count, - @bitCast(u64, offset), + @as(u64, @bitCast(offset)), ); } } @@ -690,9 +690,9 @@ pub fn rename(old: [*:0]const u8, new: [*:0]const u8) usize { if (@hasField(SYS, "rename")) { return syscall2(.rename, @intFromPtr(old), @intFromPtr(new)); } else if (@hasField(SYS, "renameat")) { - return syscall4(.renameat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new)); + return syscall4(.renameat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new)); } else { - return syscall5(.renameat2, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new), 0); + return syscall5(.renameat2, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new), 0); } } @@ -700,17 +700,17 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const if (@hasField(SYS, "renameat")) { return syscall4( .renameat, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), ); } else { return syscall5( .renameat2, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), 0, ); @@ -720,9 +720,9 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const pub fn renameat2(oldfd: i32, oldpath: [*:0]const u8, newfd: i32, newpath: [*:0]const u8, flags: u32) usize { return syscall5( .renameat2, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), flags, ); @@ -734,7 +734,7 @@ pub fn open(path: [*:0]const u8, flags: u32, perm: mode_t) usize { } else { return syscall4( .openat, - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), flags, perm, @@ -748,7 +748,7 @@ pub fn create(path: [*:0]const u8, perm: mode_t) usize { pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, mode: mode_t) usize { // dirfd could be negative, for example AT.FDCWD is -100 - return syscall4(.openat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags, mode); + return syscall4(.openat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, mode); } /// See also `clone` (from the arch-specific include) @@ -762,11 +762,11 @@ pub fn clone2(flags: u32, child_stack_ptr: usize) usize { } pub fn close(fd: i32) usize { - return syscall1(.close, @bitCast(usize, @as(isize, fd))); + return syscall1(.close, @as(usize, @bitCast(@as(isize, fd)))); } pub fn fchmod(fd: i32, mode: mode_t) usize { - return syscall2(.fchmod, @bitCast(usize, @as(isize, fd)), mode); + return syscall2(.fchmod, @as(usize, @bitCast(@as(isize, fd))), mode); } pub fn chmod(path: [*:0]const u8, mode: mode_t) usize { @@ -775,7 +775,7 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize { } else { return syscall4( .fchmodat, - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0, @@ -785,14 +785,14 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize { pub fn fchown(fd: i32, owner: uid_t, group: gid_t) usize { if (@hasField(SYS, "fchown32")) { - return syscall3(.fchown32, @bitCast(usize, @as(isize, fd)), owner, group); + return syscall3(.fchown32, @as(usize, @bitCast(@as(isize, fd))), owner, group); } else { - return syscall3(.fchown, @bitCast(usize, @as(isize, fd)), owner, group); + return syscall3(.fchown, @as(usize, @bitCast(@as(isize, fd))), owner, group); } } pub fn fchmodat(fd: i32, path: [*:0]const u8, mode: mode_t, flags: u32) usize { - return syscall4(.fchmodat, @bitCast(usize, @as(isize, fd)), @intFromPtr(path), mode, flags); + return syscall4(.fchmodat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(path), mode, flags); } /// Can only be called on 32 bit systems. For 64 bit see `lseek`. @@ -801,9 +801,9 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize { // endianness. return syscall5( ._llseek, - @bitCast(usize, @as(isize, fd)), - @truncate(usize, offset >> 32), - @truncate(usize, offset), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @truncate(offset >> 32)), + @as(usize, @truncate(offset)), @intFromPtr(result), whence, ); @@ -811,16 +811,16 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize { /// Can only be called on 64 bit systems. For 32 bit see `llseek`. pub fn lseek(fd: i32, offset: i64, whence: usize) usize { - return syscall3(.lseek, @bitCast(usize, @as(isize, fd)), @bitCast(usize, offset), whence); + return syscall3(.lseek, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(offset)), whence); } pub fn exit(status: i32) noreturn { - _ = syscall1(.exit, @bitCast(usize, @as(isize, status))); + _ = syscall1(.exit, @as(usize, @bitCast(@as(isize, status)))); unreachable; } pub fn exit_group(status: i32) noreturn { - _ = syscall1(.exit_group, @bitCast(usize, @as(isize, status))); + _ = syscall1(.exit_group, @as(usize, @bitCast(@as(isize, status)))); unreachable; } @@ -886,15 +886,15 @@ pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize { } pub fn kill(pid: pid_t, sig: i32) usize { - return syscall2(.kill, @bitCast(usize, @as(isize, pid)), @bitCast(usize, @as(isize, sig))); + return syscall2(.kill, @as(usize, @bitCast(@as(isize, pid))), @as(usize, @bitCast(@as(isize, sig)))); } pub fn tkill(tid: pid_t, sig: i32) usize { - return syscall2(.tkill, @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig))); + return syscall2(.tkill, @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig)))); } pub fn tgkill(tgid: pid_t, tid: pid_t, sig: i32) usize { - return syscall3(.tgkill, @bitCast(usize, @as(isize, tgid)), @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig))); + return syscall3(.tgkill, @as(usize, @bitCast(@as(isize, tgid))), @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig)))); } pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize { @@ -903,16 +903,16 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize { .link, @intFromPtr(oldpath), @intFromPtr(newpath), - @bitCast(usize, @as(isize, flags)), + @as(usize, @bitCast(@as(isize, flags))), ); } else { return syscall5( .linkat, - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, AT.FDCWD)), + @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(newpath), - @bitCast(usize, @as(isize, flags)), + @as(usize, @bitCast(@as(isize, flags))), ); } } @@ -920,11 +920,11 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize { pub fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: i32) usize { return syscall5( .linkat, - @bitCast(usize, @as(isize, oldfd)), + @as(usize, @bitCast(@as(isize, oldfd))), @intFromPtr(oldpath), - @bitCast(usize, @as(isize, newfd)), + @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath), - @bitCast(usize, @as(isize, flags)), + @as(usize, @bitCast(@as(isize, flags))), ); } @@ -932,22 +932,22 @@ pub fn unlink(path: [*:0]const u8) usize { if (@hasField(SYS, "unlink")) { return syscall1(.unlink, @intFromPtr(path)); } else { - return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), 0); + return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), 0); } } pub fn unlinkat(dirfd: i32, path: [*:0]const u8, flags: u32) usize { - return syscall3(.unlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags); + return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags); } pub fn waitpid(pid: pid_t, status: *u32, flags: u32) usize { - return syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @intFromPtr(status), flags, 0); + return syscall4(.wait4, @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(status), flags, 0); } pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize { return syscall4( .wait4, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(status), flags, @intFromPtr(usage), @@ -955,18 +955,18 @@ pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize { } pub fn waitid(id_type: P, id: i32, infop: *siginfo_t, flags: u32) usize { - return syscall5(.waitid, @intFromEnum(id_type), @bitCast(usize, @as(isize, id)), @intFromPtr(infop), flags, 0); + return syscall5(.waitid, @intFromEnum(id_type), @as(usize, @bitCast(@as(isize, id))), @intFromPtr(infop), flags, 0); } pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) usize { - return syscall3(.fcntl, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, cmd)), arg); + return syscall3(.fcntl, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, cmd))), arg); } pub fn flock(fd: fd_t, operation: i32) usize { - return syscall2(.flock, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, operation))); + return syscall2(.flock, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, operation)))); } -var vdso_clock_gettime = @ptrCast(?*const anyopaque, &init_vdso_clock_gettime); +var vdso_clock_gettime = @as(?*const anyopaque, @ptrCast(&init_vdso_clock_gettime)); // We must follow the C calling convention when we call into the VDSO const vdso_clock_gettime_ty = *align(1) const fn (i32, *timespec) callconv(.C) usize; @@ -975,36 +975,36 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) usize { if (@hasDecl(VDSO, "CGT_SYM")) { const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .Unordered); if (ptr) |fn_ptr| { - const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr); + const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr)); const rc = f(clk_id, tp); switch (rc) { - 0, @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL))) => return rc, + 0, @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL)))) => return rc, else => {}, } } } - return syscall2(.clock_gettime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp)); + return syscall2(.clock_gettime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize { - const ptr = @ptrFromInt(?*const anyopaque, vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM)); + const ptr = @as(?*const anyopaque, @ptrFromInt(vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM))); // Note that we may not have a VDSO at all, update the stub address anyway // so that clock_gettime will fall back on the good old (and slow) syscall @atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .Monotonic); // Call into the VDSO if available if (ptr) |fn_ptr| { - const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr); + const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr)); return f(clk, ts); } - return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS))); + return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS)))); } pub fn clock_getres(clk_id: i32, tp: *timespec) usize { - return syscall2(.clock_getres, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp)); + return syscall2(.clock_getres, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } pub fn clock_settime(clk_id: i32, tp: *const timespec) usize { - return syscall2(.clock_settime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp)); + return syscall2(.clock_settime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } pub fn gettimeofday(tv: *timeval, tz: *timezone) usize { @@ -1053,33 +1053,33 @@ pub fn setregid(rgid: gid_t, egid: gid_t) usize { pub fn getuid() uid_t { if (@hasField(SYS, "getuid32")) { - return @intCast(uid_t, syscall0(.getuid32)); + return @as(uid_t, @intCast(syscall0(.getuid32))); } else { - return @intCast(uid_t, syscall0(.getuid)); + return @as(uid_t, @intCast(syscall0(.getuid))); } } pub fn getgid() gid_t { if (@hasField(SYS, "getgid32")) { - return @intCast(gid_t, syscall0(.getgid32)); + return @as(gid_t, @intCast(syscall0(.getgid32))); } else { - return @intCast(gid_t, syscall0(.getgid)); + return @as(gid_t, @intCast(syscall0(.getgid))); } } pub fn geteuid() uid_t { if (@hasField(SYS, "geteuid32")) { - return @intCast(uid_t, syscall0(.geteuid32)); + return @as(uid_t, @intCast(syscall0(.geteuid32))); } else { - return @intCast(uid_t, syscall0(.geteuid)); + return @as(uid_t, @intCast(syscall0(.geteuid))); } } pub fn getegid() gid_t { if (@hasField(SYS, "getegid32")) { - return @intCast(gid_t, syscall0(.getegid32)); + return @as(gid_t, @intCast(syscall0(.getegid32))); } else { - return @intCast(gid_t, syscall0(.getegid)); + return @as(gid_t, @intCast(syscall0(.getegid))); } } @@ -1154,11 +1154,11 @@ pub fn setgroups(size: usize, list: [*]const gid_t) usize { } pub fn getpid() pid_t { - return @bitCast(pid_t, @truncate(u32, syscall0(.getpid))); + return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.getpid))))); } pub fn gettid() pid_t { - return @bitCast(pid_t, @truncate(u32, syscall0(.gettid))); + return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.gettid))))); } pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize { @@ -1182,9 +1182,9 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact .handler = new.handler.handler, .flags = new.flags | SA.RESTORER, .mask = undefined, - .restorer = @ptrCast(k_sigaction_funcs.restorer, restorer_fn), + .restorer = @as(k_sigaction_funcs.restorer, @ptrCast(restorer_fn)), }; - @memcpy(@ptrCast([*]u8, &ksa.mask)[0..mask_size], @ptrCast([*]const u8, &new.mask)); + @memcpy(@as([*]u8, @ptrCast(&ksa.mask))[0..mask_size], @as([*]const u8, @ptrCast(&new.mask))); } const ksa_arg = if (act != null) @intFromPtr(&ksa) else 0; @@ -1199,8 +1199,8 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact if (oact) |old| { old.handler.handler = oldksa.handler; - old.flags = @truncate(c_uint, oldksa.flags); - @memcpy(@ptrCast([*]u8, &old.mask)[0..mask_size], @ptrCast([*]const u8, &oldksa.mask)); + old.flags = @as(c_uint, @truncate(oldksa.flags)); + @memcpy(@as([*]u8, @ptrCast(&old.mask))[0..mask_size], @as([*]const u8, @ptrCast(&oldksa.mask))); } return 0; @@ -1211,28 +1211,28 @@ const usize_bits = @typeInfo(usize).Int.bits; pub fn sigaddset(set: *sigset_t, sig: u6) void { const s = sig - 1; // shift in musl: s&8*sizeof *set->__bits-1 - const shift = @intCast(u5, s & (usize_bits - 1)); - const val = @intCast(u32, 1) << shift; - (set.*)[@intCast(usize, s) / usize_bits] |= val; + const shift = @as(u5, @intCast(s & (usize_bits - 1))); + const val = @as(u32, @intCast(1)) << shift; + (set.*)[@as(usize, @intCast(s)) / usize_bits] |= val; } pub fn sigismember(set: *const sigset_t, sig: u6) bool { const s = sig - 1; - return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0; + return ((set.*)[@as(usize, @intCast(s)) / usize_bits] & (@as(usize, @intCast(1)) << (s & (usize_bits - 1)))) != 0; } pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.getsockname, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) }); + return socketcall(SC.getsockname, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) }); } - return syscall3(.getsockname, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len)); + return syscall3(.getsockname, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len)); } pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.getpeername, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) }); + return socketcall(SC.getpeername, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) }); } - return syscall3(.getpeername, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len)); + return syscall3(.getpeername, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len)); } pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { @@ -1244,20 +1244,20 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.setsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen) }); + return socketcall(SC.setsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen)) }); } - return syscall5(.setsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen)); + return syscall5(.setsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen))); } pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.getsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen) }); + return socketcall(SC.getsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen) }); } - return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen)); + return syscall5(.getsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen)); } pub fn sendmsg(fd: i32, msg: *const msghdr_const, flags: u32) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const msg_usize = @intFromPtr(msg); if (native_arch == .x86) { return socketcall(SC.sendmsg, &[3]usize{ fd_usize, msg_usize, flags }); @@ -1275,13 +1275,13 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize var next_unsent: usize = 0; for (msgvec[0..kvlen], 0..) |*msg, i| { var size: i32 = 0; - const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned + const msg_iovlen = @as(usize, @intCast(msg.msg_hdr.msg_iovlen)); // kernel side this is treated as unsigned for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| { - if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @intCast(i32, iov.iov_len))[1] != 0) { + if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @as(i32, @intCast(iov.iov_len)))[1] != 0) { // batch-send all messages up to the current message if (next_unsent < i) { const batch_size = i - next_unsent; - const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); + const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); if (getErrno(r) != 0) return next_unsent; if (r < batch_size) return next_unsent + r; } @@ -1289,7 +1289,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize const r = sendmsg(fd, &msg.msg_hdr, flags); if (getErrno(r) != 0) return r; // Linux limits the total bytes sent by sendmsg to INT_MAX, so this cast is safe. - msg.msg_len = @intCast(u32, r); + msg.msg_len = @as(u32, @intCast(r)); next_unsent = i + 1; break; } @@ -1297,17 +1297,17 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize } if (next_unsent < kvlen or next_unsent == 0) { // want to make sure at least one syscall occurs (e.g. to trigger MSG.EOR) const batch_size = kvlen - next_unsent; - const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); + const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags); if (getErrno(r) != 0) return r; return next_unsent + r; } return kvlen; } - return syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(msgvec), vlen, flags); + return syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(msgvec), vlen, flags); } pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const addr_usize = @intFromPtr(addr); if (native_arch == .x86) { return socketcall(SC.connect, &[3]usize{ fd_usize, addr_usize, len }); @@ -1317,7 +1317,7 @@ pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize { } pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const msg_usize = @intFromPtr(msg); if (native_arch == .x86) { return socketcall(SC.recvmsg, &[3]usize{ fd_usize, msg_usize, flags }); @@ -1334,7 +1334,7 @@ pub fn recvfrom( noalias addr: ?*sockaddr, noalias alen: ?*socklen_t, ) usize { - const fd_usize = @bitCast(usize, @as(isize, fd)); + const fd_usize = @as(usize, @bitCast(@as(isize, fd))); const buf_usize = @intFromPtr(buf); const addr_usize = @intFromPtr(addr); const alen_usize = @intFromPtr(alen); @@ -1347,46 +1347,46 @@ pub fn recvfrom( pub fn shutdown(fd: i32, how: i32) usize { if (native_arch == .x86) { - return socketcall(SC.shutdown, &[2]usize{ @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)) }); + return socketcall(SC.shutdown, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how))) }); } - return syscall2(.shutdown, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how))); + return syscall2(.shutdown, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how)))); } pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.bind, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len) }); + return socketcall(SC.bind, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len)) }); } - return syscall3(.bind, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len)); + return syscall3(.bind, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len))); } pub fn listen(fd: i32, backlog: u32) usize { if (native_arch == .x86) { - return socketcall(SC.listen, &[2]usize{ @bitCast(usize, @as(isize, fd)), backlog }); + return socketcall(SC.listen, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), backlog }); } - return syscall2(.listen, @bitCast(usize, @as(isize, fd)), backlog); + return syscall2(.listen, @as(usize, @bitCast(@as(isize, fd))), backlog); } pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.sendto, &[6]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen) }); + return socketcall(SC.sendto, &[6]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen)) }); } - return syscall6(.sendto, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen)); + return syscall6(.sendto, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen))); } pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize { if (@hasField(SYS, "sendfile64")) { return syscall4( .sendfile64, - @bitCast(usize, @as(isize, outfd)), - @bitCast(usize, @as(isize, infd)), + @as(usize, @bitCast(@as(isize, outfd))), + @as(usize, @bitCast(@as(isize, infd))), @intFromPtr(offset), count, ); } else { return syscall4( .sendfile, - @bitCast(usize, @as(isize, outfd)), - @bitCast(usize, @as(isize, infd)), + @as(usize, @bitCast(@as(isize, outfd))), + @as(usize, @bitCast(@as(isize, infd))), @intFromPtr(offset), count, ); @@ -1395,9 +1395,9 @@ pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize { pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: *[2]i32) usize { if (native_arch == .x86) { - return socketcall(SC.socketpair, &[4]usize{ @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd) }); + return socketcall(SC.socketpair, &[4]usize{ @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd) }); } - return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd)); + return syscall4(.socketpair, @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd)); } pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize { @@ -1409,16 +1409,16 @@ pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flags: u32) usize { if (native_arch == .x86) { - return socketcall(SC.accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags }); + return socketcall(SC.accept4, &[4]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags }); } - return syscall4(.accept4, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags); + return syscall4(.accept4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags); } pub fn fstat(fd: i32, stat_buf: *Stat) usize { if (@hasField(SYS, "fstat64")) { - return syscall2(.fstat64, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf)); + return syscall2(.fstat64, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf)); } else { - return syscall2(.fstat, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf)); + return syscall2(.fstat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf)); } } @@ -1440,9 +1440,9 @@ pub fn lstat(pathname: [*:0]const u8, statbuf: *Stat) usize { pub fn fstatat(dirfd: i32, path: [*:0]const u8, stat_buf: *Stat, flags: u32) usize { if (@hasField(SYS, "fstatat64")) { - return syscall4(.fstatat64, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags); + return syscall4(.fstatat64, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags); } else { - return syscall4(.fstatat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags); + return syscall4(.fstatat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags); } } @@ -1450,14 +1450,14 @@ pub fn statx(dirfd: i32, path: [*]const u8, flags: u32, mask: u32, statx_buf: *S if (@hasField(SYS, "statx")) { return syscall5( .statx, - @bitCast(usize, @as(isize, dirfd)), + @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, mask, @intFromPtr(statx_buf), ); } - return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS))); + return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS)))); } pub fn listxattr(path: [*:0]const u8, list: [*]u8, size: usize) usize { @@ -1513,9 +1513,9 @@ pub fn sched_yield() usize { } pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize { - const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set)); - if (@bitCast(isize, rc) < 0) return rc; - if (rc < size) @memset(@ptrCast([*]u8, set)[rc..size], 0); + const rc = syscall3(.sched_getaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set)); + if (@as(isize, @bitCast(rc)) < 0) return rc; + if (rc < size) @memset(@as([*]u8, @ptrCast(set))[rc..size], 0); return 0; } @@ -1526,18 +1526,18 @@ pub fn getcpu(cpu: *u32, node: *u32) usize { pub fn sched_getcpu() usize { var cpu: u32 = undefined; const rc = syscall3(.getcpu, @intFromPtr(&cpu), 0, 0); - if (@bitCast(isize, rc) < 0) return rc; - return @intCast(usize, cpu); + if (@as(isize, @bitCast(rc)) < 0) return rc; + return @as(usize, @intCast(cpu)); } /// libc has no wrapper for this syscall pub fn mbind(addr: ?*anyopaque, len: u32, mode: i32, nodemask: *const u32, maxnode: u32, flags: u32) usize { - return syscall6(.mbind, @intFromPtr(addr), len, @bitCast(usize, @as(isize, mode)), @intFromPtr(nodemask), maxnode, flags); + return syscall6(.mbind, @intFromPtr(addr), len, @as(usize, @bitCast(@as(isize, mode))), @intFromPtr(nodemask), maxnode, flags); } pub fn sched_setaffinity(pid: pid_t, size: usize, set: *const cpu_set_t) usize { - const rc = syscall3(.sched_setaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set)); - if (@bitCast(isize, rc) < 0) return rc; + const rc = syscall3(.sched_setaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set)); + if (@as(isize, @bitCast(rc)) < 0) return rc; return 0; } @@ -1550,7 +1550,7 @@ pub fn epoll_create1(flags: usize) usize { } pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: ?*epoll_event) usize { - return syscall4(.epoll_ctl, @bitCast(usize, @as(isize, epoll_fd)), @intCast(usize, op), @bitCast(usize, @as(isize, fd)), @intFromPtr(ev)); + return syscall4(.epoll_ctl, @as(usize, @bitCast(@as(isize, epoll_fd))), @as(usize, @intCast(op)), @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(ev)); } pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize { @@ -1560,10 +1560,10 @@ pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout pub fn epoll_pwait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32, sigmask: ?*const sigset_t) usize { return syscall6( .epoll_pwait, - @bitCast(usize, @as(isize, epoll_fd)), + @as(usize, @bitCast(@as(isize, epoll_fd))), @intFromPtr(events), - @intCast(usize, maxevents), - @bitCast(usize, @as(isize, timeout)), + @as(usize, @intCast(maxevents)), + @as(usize, @bitCast(@as(isize, timeout))), @intFromPtr(sigmask), @sizeOf(sigset_t), ); @@ -1574,7 +1574,7 @@ pub fn eventfd(count: u32, flags: u32) usize { } pub fn timerfd_create(clockid: i32, flags: u32) usize { - return syscall2(.timerfd_create, @bitCast(usize, @as(isize, clockid)), flags); + return syscall2(.timerfd_create, @as(usize, @bitCast(@as(isize, clockid))), flags); } pub const itimerspec = extern struct { @@ -1583,11 +1583,11 @@ pub const itimerspec = extern struct { }; pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize { - return syscall2(.timerfd_gettime, @bitCast(usize, @as(isize, fd)), @intFromPtr(curr_value)); + return syscall2(.timerfd_gettime, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(curr_value)); } pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { - return syscall4(.timerfd_settime, @bitCast(usize, @as(isize, fd)), flags, @intFromPtr(new_value), @intFromPtr(old_value)); + return syscall4(.timerfd_settime, @as(usize, @bitCast(@as(isize, fd))), flags, @intFromPtr(new_value), @intFromPtr(old_value)); } pub const sigevent = extern struct { @@ -1609,8 +1609,8 @@ pub const timer_t = ?*anyopaque; pub fn timer_create(clockid: i32, sevp: *sigevent, timerid: *timer_t) usize { var t: timer_t = undefined; - const rc = syscall3(.timer_create, @bitCast(usize, @as(isize, clockid)), @intFromPtr(sevp), @intFromPtr(&t)); - if (@bitCast(isize, rc) < 0) return rc; + const rc = syscall3(.timer_create, @as(usize, @bitCast(@as(isize, clockid))), @intFromPtr(sevp), @intFromPtr(&t)); + if (@as(isize, @bitCast(rc)) < 0) return rc; timerid.* = t; return rc; } @@ -1624,7 +1624,7 @@ pub fn timer_gettime(timerid: timer_t, curr_value: *itimerspec) usize { } pub fn timer_settime(timerid: timer_t, flags: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { - return syscall4(.timer_settime, @intFromPtr(timerid), @bitCast(usize, @as(isize, flags)), @intFromPtr(new_value), @intFromPtr(old_value)); + return syscall4(.timer_settime, @intFromPtr(timerid), @as(usize, @bitCast(@as(isize, flags))), @intFromPtr(new_value), @intFromPtr(old_value)); } // Flags for the 'setitimer' system call @@ -1635,11 +1635,11 @@ pub const ITIMER = enum(i32) { }; pub fn getitimer(which: i32, curr_value: *itimerspec) usize { - return syscall2(.getitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(curr_value)); + return syscall2(.getitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(curr_value)); } pub fn setitimer(which: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { - return syscall3(.setitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(new_value), @intFromPtr(old_value)); + return syscall3(.setitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(new_value), @intFromPtr(old_value)); } pub fn unshare(flags: usize) usize { @@ -1667,11 +1667,11 @@ pub fn io_uring_setup(entries: u32, p: *io_uring_params) usize { } pub fn io_uring_enter(fd: i32, to_submit: u32, min_complete: u32, flags: u32, sig: ?*sigset_t) usize { - return syscall6(.io_uring_enter, @bitCast(usize, @as(isize, fd)), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8); + return syscall6(.io_uring_enter, @as(usize, @bitCast(@as(isize, fd))), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8); } pub fn io_uring_register(fd: i32, opcode: IORING_REGISTER, arg: ?*const anyopaque, nr_args: u32) usize { - return syscall4(.io_uring_register, @bitCast(usize, @as(isize, fd)), @intFromEnum(opcode), @intFromPtr(arg), nr_args); + return syscall4(.io_uring_register, @as(usize, @bitCast(@as(isize, fd))), @intFromEnum(opcode), @intFromPtr(arg), nr_args); } pub fn memfd_create(name: [*:0]const u8, flags: u32) usize { @@ -1679,43 +1679,43 @@ pub fn memfd_create(name: [*:0]const u8, flags: u32) usize { } pub fn getrusage(who: i32, usage: *rusage) usize { - return syscall2(.getrusage, @bitCast(usize, @as(isize, who)), @intFromPtr(usage)); + return syscall2(.getrusage, @as(usize, @bitCast(@as(isize, who))), @intFromPtr(usage)); } pub fn tcgetattr(fd: fd_t, termios_p: *termios) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CGETS, @intFromPtr(termios_p)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CGETS, @intFromPtr(termios_p)); } pub fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p)); } pub fn tcgetpgrp(fd: fd_t, pgrp: *pid_t) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCGPGRP, @intFromPtr(pgrp)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCGPGRP, @intFromPtr(pgrp)); } pub fn tcsetpgrp(fd: fd_t, pgrp: *const pid_t) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCSPGRP, @intFromPtr(pgrp)); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCSPGRP, @intFromPtr(pgrp)); } pub fn tcdrain(fd: fd_t) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSBRK, 1); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSBRK, 1); } pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize { - return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg); + return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), request, arg); } pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) usize { - return syscall4(.signalfd4, @bitCast(usize, @as(isize, fd)), @intFromPtr(mask), NSIG / 8, flags); + return syscall4(.signalfd4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(mask), NSIG / 8, flags); } pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize { return syscall6( .copy_file_range, - @bitCast(usize, @as(isize, fd_in)), + @as(usize, @bitCast(@as(isize, fd_in))), @intFromPtr(off_in), - @bitCast(usize, @as(isize, fd_out)), + @as(usize, @bitCast(@as(isize, fd_out))), @intFromPtr(off_out), len, flags, @@ -1731,19 +1731,19 @@ pub fn sync() void { } pub fn syncfs(fd: fd_t) usize { - return syscall1(.syncfs, @bitCast(usize, @as(isize, fd))); + return syscall1(.syncfs, @as(usize, @bitCast(@as(isize, fd)))); } pub fn fsync(fd: fd_t) usize { - return syscall1(.fsync, @bitCast(usize, @as(isize, fd))); + return syscall1(.fsync, @as(usize, @bitCast(@as(isize, fd)))); } pub fn fdatasync(fd: fd_t) usize { - return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd))); + return syscall1(.fdatasync, @as(usize, @bitCast(@as(isize, fd)))); } pub fn prctl(option: i32, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize { - return syscall5(.prctl, @bitCast(usize, @as(isize, option)), arg2, arg3, arg4, arg5); + return syscall5(.prctl, @as(usize, @bitCast(@as(isize, option))), arg2, arg3, arg4, arg5); } pub fn getrlimit(resource: rlimit_resource, rlim: *rlimit) usize { @@ -1759,8 +1759,8 @@ pub fn setrlimit(resource: rlimit_resource, rlim: *const rlimit) usize { pub fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: ?*const rlimit, old_limit: ?*rlimit) usize { return syscall4( .prlimit64, - @bitCast(usize, @as(isize, pid)), - @bitCast(usize, @as(isize, @intFromEnum(resource))), + @as(usize, @bitCast(@as(isize, pid))), + @as(usize, @bitCast(@as(isize, @intFromEnum(resource)))), @intFromPtr(new_limit), @intFromPtr(old_limit), ); @@ -1775,14 +1775,14 @@ pub fn madvise(address: [*]u8, len: usize, advice: u32) usize { } pub fn pidfd_open(pid: pid_t, flags: u32) usize { - return syscall2(.pidfd_open, @bitCast(usize, @as(isize, pid)), flags); + return syscall2(.pidfd_open, @as(usize, @bitCast(@as(isize, pid))), flags); } pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize { return syscall3( .pidfd_getfd, - @bitCast(usize, @as(isize, pidfd)), - @bitCast(usize, @as(isize, targetfd)), + @as(usize, @bitCast(@as(isize, pidfd))), + @as(usize, @bitCast(@as(isize, targetfd))), flags, ); } @@ -1790,8 +1790,8 @@ pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize { pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) usize { return syscall4( .pidfd_send_signal, - @bitCast(usize, @as(isize, pidfd)), - @bitCast(usize, @as(isize, sig)), + @as(usize, @bitCast(@as(isize, pidfd))), + @as(usize, @bitCast(@as(isize, sig))), @intFromPtr(info), flags, ); @@ -1800,7 +1800,7 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize { return syscall6( .process_vm_readv, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(local.ptr), local.len, @intFromPtr(remote.ptr), @@ -1812,7 +1812,7 @@ pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, pub fn process_vm_writev(pid: pid_t, local: []const iovec_const, remote: []const iovec_const, flags: usize) usize { return syscall6( .process_vm_writev, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(local.ptr), local.len, @intFromPtr(remote.ptr), @@ -1830,7 +1830,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { return syscall7( .fadvise64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), 0, offset_halves[0], offset_halves[1], @@ -1846,7 +1846,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { return syscall6( .fadvise64_64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), advice, offset_halves[0], offset_halves[1], @@ -1862,7 +1862,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { return syscall6( .fadvise64_64, - @bitCast(usize, @as(isize, fd)), + @as(usize, @bitCast(@as(isize, fd))), offset_halves[0], offset_halves[1], length_halves[0], @@ -1872,9 +1872,9 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize { } else { return syscall4( .fadvise64, - @bitCast(usize, @as(isize, fd)), - @bitCast(usize, offset), - @bitCast(usize, len), + @as(usize, @bitCast(@as(isize, fd))), + @as(usize, @bitCast(offset)), + @as(usize, @bitCast(len)), advice, ); } @@ -1890,9 +1890,9 @@ pub fn perf_event_open( return syscall5( .perf_event_open, @intFromPtr(attr), - @bitCast(usize, @as(isize, pid)), - @bitCast(usize, @as(isize, cpu)), - @bitCast(usize, @as(isize, group_fd)), + @as(usize, @bitCast(@as(isize, pid))), + @as(usize, @bitCast(@as(isize, cpu))), + @as(usize, @bitCast(@as(isize, group_fd))), flags, ); } @@ -1911,7 +1911,7 @@ pub fn ptrace( return syscall5( .ptrace, req, - @bitCast(usize, @as(isize, pid)), + @as(usize, @bitCast(@as(isize, pid))), addr, data, addr2, @@ -2057,7 +2057,7 @@ pub const W = struct { pub const NOWAIT = 0x1000000; pub fn EXITSTATUS(s: u32) u8 { - return @intCast(u8, (s & 0xff00) >> 8); + return @as(u8, @intCast((s & 0xff00) >> 8)); } pub fn TERMSIG(s: u32) u32 { return s & 0x7f; @@ -2069,7 +2069,7 @@ pub const W = struct { return TERMSIG(s) == 0; } pub fn IFSTOPPED(s: u32) bool { - return @truncate(u16, ((s & 0xffff) *% 0x10001) >> 8) > 0x7f00; + return @as(u16, @truncate(((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00; } pub fn IFSIGNALED(s: u32) bool { return (s & 0xffff) -% 1 < 0xff; @@ -2154,9 +2154,9 @@ pub const SIG = if (is_mips) struct { pub const SYS = 31; pub const UNUSED = SIG.SYS; - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); } else if (is_sparc) struct { pub const BLOCK = 1; pub const UNBLOCK = 2; @@ -2198,9 +2198,9 @@ pub const SIG = if (is_mips) struct { pub const PWR = LOST; pub const IO = SIG.POLL; - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); } else struct { pub const BLOCK = 0; pub const UNBLOCK = 1; @@ -2241,9 +2241,9 @@ pub const SIG = if (is_mips) struct { pub const SYS = 31; pub const UNUSED = SIG.SYS; - pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize)); - pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0); - pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1); + pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize))); + pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0)); + pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1)); }; pub const kernel_rwf = u32; @@ -3541,7 +3541,7 @@ pub const CAP = struct { } pub fn TO_MASK(cap: u8) u32 { - return @as(u32, 1) << @intCast(u5, cap & 31); + return @as(u32, 1) << @as(u5, @intCast(cap & 31)); } pub fn TO_INDEX(cap: u8) u8 { @@ -3598,7 +3598,7 @@ pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8)); fn cpu_mask(s: usize) cpu_count_t { var x = s & (CPU_SETSIZE * 8); - return @intCast(cpu_count_t, 1) << @intCast(u4, x); + return @as(cpu_count_t, @intCast(1)) << @as(u4, @intCast(x)); } pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t { @@ -3999,7 +3999,7 @@ pub const io_uring_cqe = extern struct { pub fn err(self: io_uring_cqe) E { if (self.res > -4096 and self.res < 0) { - return @enumFromInt(E, -self.res); + return @as(E, @enumFromInt(-self.res)); } return .SUCCESS; } diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 87b92587f959..751e5dc95ae1 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -643,7 +643,7 @@ pub const Insn = packed struct { .dst = @intFromEnum(dst), .src = @intFromEnum(src), .off = 0, - .imm = @intCast(i32, @truncate(u32, imm)), + .imm = @as(i32, @intCast(@as(u32, @truncate(imm)))), }; } @@ -653,7 +653,7 @@ pub const Insn = packed struct { .dst = 0, .src = 0, .off = 0, - .imm = @intCast(i32, @truncate(u32, imm >> 32)), + .imm = @as(i32, @intCast(@as(u32, @truncate(imm >> 32)))), }; } @@ -666,11 +666,11 @@ pub const Insn = packed struct { } pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn { - return ld_imm_impl1(dst, @enumFromInt(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd)); + return ld_imm_impl1(dst, @as(Reg, @enumFromInt(PSEUDO_MAP_FD)), @as(u64, @intCast(map_fd))); } pub fn ld_map_fd2(map_fd: fd_t) Insn { - return ld_imm_impl2(@intCast(u64, map_fd)); + return ld_imm_impl2(@as(u64, @intCast(map_fd))); } pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn { @@ -786,17 +786,17 @@ test "opcodes" { // TODO: byteswap instructions try expect_opcode(0xd4, Insn.le(.half_word, .r1)); - try expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm); + try expectEqual(@as(i32, @intCast(16)), Insn.le(.half_word, .r1).imm); try expect_opcode(0xd4, Insn.le(.word, .r1)); - try expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm); + try expectEqual(@as(i32, @intCast(32)), Insn.le(.word, .r1).imm); try expect_opcode(0xd4, Insn.le(.double_word, .r1)); - try expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm); + try expectEqual(@as(i32, @intCast(64)), Insn.le(.double_word, .r1).imm); try expect_opcode(0xdc, Insn.be(.half_word, .r1)); - try expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm); + try expectEqual(@as(i32, @intCast(16)), Insn.be(.half_word, .r1).imm); try expect_opcode(0xdc, Insn.be(.word, .r1)); - try expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm); + try expectEqual(@as(i32, @intCast(32)), Insn.be(.word, .r1).imm); try expect_opcode(0xdc, Insn.be(.double_word, .r1)); - try expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm); + try expectEqual(@as(i32, @intCast(64)), Insn.be(.double_word, .r1).imm); // memory instructions try expect_opcode(0x18, Insn.ld_dw1(.r1, 0)); @@ -804,7 +804,7 @@ test "opcodes" { // loading a map fd try expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0)); - try expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src); + try expectEqual(@as(u4, @intCast(PSEUDO_MAP_FD)), Insn.ld_map_fd1(.r1, 0).src); try expect_opcode(0x00, Insn.ld_map_fd2(0)); try expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0)); @@ -1518,7 +1518,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries const rc = linux.bpf(.map_create, &attr, @sizeOf(MapCreateAttr)); switch (errno(rc)) { - .SUCCESS => return @intCast(fd_t, rc), + .SUCCESS => return @as(fd_t, @intCast(rc)), .INVAL => return error.MapTypeOrAttrInvalid, .NOMEM => return error.SystemResources, .PERM => return error.AccessDenied, @@ -1668,20 +1668,20 @@ pub fn prog_load( attr.prog_load.prog_type = @intFromEnum(prog_type); attr.prog_load.insns = @intFromPtr(insns.ptr); - attr.prog_load.insn_cnt = @intCast(u32, insns.len); + attr.prog_load.insn_cnt = @as(u32, @intCast(insns.len)); attr.prog_load.license = @intFromPtr(license.ptr); attr.prog_load.kern_version = kern_version; attr.prog_load.prog_flags = flags; if (log) |l| { attr.prog_load.log_buf = @intFromPtr(l.buf.ptr); - attr.prog_load.log_size = @intCast(u32, l.buf.len); + attr.prog_load.log_size = @as(u32, @intCast(l.buf.len)); attr.prog_load.log_level = l.level; } const rc = linux.bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr)); return switch (errno(rc)) { - .SUCCESS => @intCast(fd_t, rc), + .SUCCESS => @as(fd_t, @intCast(rc)), .ACCES => error.UnsafeProgram, .FAULT => unreachable, .INVAL => error.InvalidProgram, diff --git a/lib/std/os/linux/bpf/helpers.zig b/lib/std/os/linux/bpf/helpers.zig index b26e7eda29ae..027220088e17 100644 --- a/lib/std/os/linux/bpf/helpers.zig +++ b/lib/std/os/linux/bpf/helpers.zig @@ -11,147 +11,147 @@ const SkFullSock = @compileError("TODO missing os bits: SkFullSock"); // // Note, these function signatures were created from documentation found in // '/usr/include/linux/bpf.h' -pub const map_lookup_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, 1); -pub const map_update_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, 2); -pub const map_delete_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, 3); -pub const probe_read = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 4); -pub const ktime_get_ns = @ptrFromInt(*const fn () u64, 5); -pub const trace_printk = @ptrFromInt(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, 6); -pub const get_prandom_u32 = @ptrFromInt(*const fn () u32, 7); -pub const get_smp_processor_id = @ptrFromInt(*const fn () u32, 8); -pub const skb_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, 9); -pub const l3_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, 10); -pub const l4_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, 11); -pub const tail_call = @ptrFromInt(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, 12); -pub const clone_redirect = @ptrFromInt(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, 13); -pub const get_current_pid_tgid = @ptrFromInt(*const fn () u64, 14); -pub const get_current_uid_gid = @ptrFromInt(*const fn () u64, 15); -pub const get_current_comm = @ptrFromInt(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, 16); -pub const get_cgroup_classid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 17); +pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1)); +pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2)); +pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3)); +pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4)); +pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5)); +pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6)); +pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7)); +pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8)); +pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9)); +pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10)); +pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11)); +pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12)); +pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13)); +pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14)); +pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15)); +pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16)); +pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17)); // Note vlan_proto is big endian -pub const skb_vlan_push = @ptrFromInt(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, 18); -pub const skb_vlan_pop = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 19); -pub const skb_get_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 20); -pub const skb_set_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 21); -pub const perf_event_read = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64) u64, 22); -pub const redirect = @ptrFromInt(*const fn (ifindex: u32, flags: u64) c_long, 23); -pub const get_route_realm = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 24); -pub const perf_event_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 25); -pub const skb_load_bytes = @ptrFromInt(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, 26); -pub const get_stackid = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, 27); +pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18)); +pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19)); +pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20)); +pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21)); +pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22)); +pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23)); +pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24)); +pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25)); +pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26)); +pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27)); // from and to point to __be32 -pub const csum_diff = @ptrFromInt(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, 28); -pub const skb_get_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 29); -pub const skb_set_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 30); +pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28)); +pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29)); +pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30)); // proto is __be16 -pub const skb_change_proto = @ptrFromInt(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, 31); -pub const skb_change_type = @ptrFromInt(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, 32); -pub const skb_under_cgroup = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, 33); -pub const get_hash_recalc = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 34); -pub const get_current_task = @ptrFromInt(*const fn () u64, 35); -pub const probe_write_user = @ptrFromInt(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, 36); -pub const current_task_under_cgroup = @ptrFromInt(*const fn (map: *const kern.MapDef, index: u32) c_long, 37); -pub const skb_change_tail = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 38); -pub const skb_pull_data = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32) c_long, 39); -pub const csum_update = @ptrFromInt(*const fn (skb: *kern.SkBuff, csum: u32) i64, 40); -pub const set_hash_invalid = @ptrFromInt(*const fn (skb: *kern.SkBuff) void, 41); -pub const get_numa_node_id = @ptrFromInt(*const fn () c_long, 42); -pub const skb_change_head = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 43); -pub const xdp_adjust_head = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 44); -pub const probe_read_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 45); -pub const get_socket_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 46); -pub const get_socket_uid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 47); -pub const set_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, hash: u32) c_long, 48); -pub const setsockopt = @ptrFromInt(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 49); -pub const skb_adjust_room = @ptrFromInt(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, 50); -pub const redirect_map = @ptrFromInt(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, 51); -pub const sk_redirect_map = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, 52); -pub const sock_map_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 53); -pub const xdp_adjust_meta = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 54); -pub const perf_event_read_value = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, 55); -pub const perf_prog_read_value = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, 56); -pub const getsockopt = @ptrFromInt(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 57); -pub const override_return = @ptrFromInt(*const fn (regs: *PtRegs, rc: u64) c_long, 58); -pub const sock_ops_cb_flags_set = @ptrFromInt(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, 59); -pub const msg_redirect_map = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, 60); -pub const msg_apply_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 61); -pub const msg_cork_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 62); -pub const msg_pull_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, 63); -pub const bind = @ptrFromInt(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, 64); -pub const xdp_adjust_tail = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 65); -pub const skb_get_xfrm_state = @ptrFromInt(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, 66); -pub const get_stack = @ptrFromInt(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 67); -pub const skb_load_bytes_relative = @ptrFromInt(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, 68); -pub const fib_lookup = @ptrFromInt(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, 69); -pub const sock_hash_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 70); -pub const msg_redirect_hash = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 71); -pub const sk_redirect_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 72); -pub const lwt_push_encap = @ptrFromInt(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, 73); -pub const lwt_seg6_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, 74); -pub const lwt_seg6_adjust_srh = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, 75); -pub const lwt_seg6_action = @ptrFromInt(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, 76); -pub const rc_repeat = @ptrFromInt(*const fn (ctx: ?*anyopaque) c_long, 77); -pub const rc_keydown = @ptrFromInt(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, 78); -pub const skb_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff) u64, 79); -pub const get_current_cgroup_id = @ptrFromInt(*const fn () u64, 80); -pub const get_local_storage = @ptrFromInt(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, 81); -pub const sk_select_reuseport = @ptrFromInt(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 82); -pub const skb_ancestor_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, 83); -pub const sk_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 84); -pub const sk_lookup_udp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 85); -pub const sk_release = @ptrFromInt(*const fn (sock: *kern.Sock) c_long, 86); -pub const map_push_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, 87); -pub const map_pop_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 88); -pub const map_peek_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 89); -pub const msg_push_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 90); -pub const msg_pop_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 91); -pub const rc_pointer_rel = @ptrFromInt(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, 92); -pub const spin_lock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 93); -pub const spin_unlock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 94); -pub const sk_fullsock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*SkFullSock, 95); -pub const tcp_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.TcpSock, 96); -pub const skb_ecn_set_ce = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 97); -pub const get_listener_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.Sock, 98); -pub const skc_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 99); -pub const tcp_check_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, 100); -pub const sysctl_get_name = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, 101); -pub const sysctl_get_current_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 102); -pub const sysctl_get_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 103); -pub const sysctl_set_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, 104); -pub const strtol = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, 105); -pub const strtoul = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, 106); -pub const sk_storage_get = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, 107); -pub const sk_storage_delete = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, 108); -pub const send_signal = @ptrFromInt(*const fn (sig: u32) c_long, 109); -pub const tcp_gen_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, 110); -pub const skb_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 111); -pub const probe_read_user = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 112); -pub const probe_read_kernel = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 113); -pub const probe_read_user_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 114); -pub const probe_read_kernel_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 115); -pub const tcp_send_ack = @ptrFromInt(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, 116); -pub const send_signal_thread = @ptrFromInt(*const fn (sig: u32) c_long, 117); -pub const jiffies64 = @ptrFromInt(*const fn () u64, 118); -pub const read_branch_records = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, 119); -pub const get_ns_current_pid_tgid = @ptrFromInt(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, 120); -pub const xdp_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 121); -pub const get_netns_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 122); -pub const get_current_ancestor_cgroup_id = @ptrFromInt(*const fn (ancestor_level: c_int) u64, 123); -pub const sk_assign = @ptrFromInt(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, 124); -pub const ktime_get_boot_ns = @ptrFromInt(*const fn () u64, 125); -pub const seq_printf = @ptrFromInt(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, 126); -pub const seq_write = @ptrFromInt(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127); -pub const sk_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock) u64, 128); -pub const sk_ancestor_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129); -pub const ringbuf_output = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, 130); -pub const ringbuf_reserve = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 131); -pub const ringbuf_submit = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 132); -pub const ringbuf_discard = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 133); -pub const ringbuf_query = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, 134); -pub const csum_level = @ptrFromInt(*const fn (skb: *kern.SkBuff, level: u64) c_long, 135); -pub const skc_to_tcp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, 136); -pub const skc_to_tcp_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, 137); -pub const skc_to_tcp_timewait_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, 138); -pub const skc_to_tcp_request_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, 139); -pub const skc_to_udp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, 140); -pub const get_task_stack = @ptrFromInt(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 141); +pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31)); +pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32)); +pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33)); +pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34)); +pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35)); +pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36)); +pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37)); +pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38)); +pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39)); +pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40)); +pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41)); +pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42)); +pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43)); +pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44)); +pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45)); +pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46)); +pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47)); +pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48)); +pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49)); +pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50)); +pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51)); +pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52)); +pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53)); +pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54)); +pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55)); +pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56)); +pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57)); +pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58)); +pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59)); +pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60)); +pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61)); +pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62)); +pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63)); +pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64)); +pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65)); +pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66)); +pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67)); +pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68)); +pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69)); +pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70)); +pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71)); +pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72)); +pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73)); +pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74)); +pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75)); +pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76)); +pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77)); +pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78)); +pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79)); +pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80)); +pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81)); +pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82)); +pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83)); +pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84)); +pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85)); +pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86)); +pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87)); +pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88)); +pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89)); +pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90)); +pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91)); +pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92)); +pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93)); +pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94)); +pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95)); +pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96)); +pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97)); +pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98)); +pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99)); +pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100)); +pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101)); +pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102)); +pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103)); +pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104)); +pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105)); +pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106)); +pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107)); +pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108)); +pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109)); +pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110)); +pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111)); +pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112)); +pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113)); +pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114)); +pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115)); +pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116)); +pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117)); +pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118)); +pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119)); +pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120)); +pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121)); +pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122)); +pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123)); +pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124)); +pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125)); +pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126)); +pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127)); +pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128)); +pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129)); +pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130)); +pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131)); +pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132)); +pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133)); +pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134)); +pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135)); +pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136)); +pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137)); +pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138)); +pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139)); +pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140)); +pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141)); diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index 875138cf4f47..df8cd207739e 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -60,7 +60,7 @@ pub const IO_Uring = struct { .NOSYS => return error.SystemOutdated, else => |errno| return os.unexpectedErrno(errno), } - const fd = @intCast(os.fd_t, res); + const fd = @as(os.fd_t, @intCast(res)); assert(fd >= 0); errdefer os.close(fd); @@ -198,7 +198,7 @@ pub const IO_Uring = struct { .INTR => return error.SignalInterrupt, else => |errno| return os.unexpectedErrno(errno), } - return @intCast(u32, res); + return @as(u32, @intCast(res)); } /// Sync internal state with kernel ring state on the SQ side. @@ -937,8 +937,8 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_FILES, - @ptrCast(*const anyopaque, fds.ptr), - @intCast(u32, fds.len), + @as(*const anyopaque, @ptrCast(fds.ptr)), + @as(u32, @intCast(fds.len)), ); try handle_registration_result(res); } @@ -968,8 +968,8 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_FILES_UPDATE, - @ptrCast(*const anyopaque, &update), - @intCast(u32, fds.len), + @as(*const anyopaque, @ptrCast(&update)), + @as(u32, @intCast(fds.len)), ); try handle_registration_result(res); } @@ -982,7 +982,7 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_EVENTFD, - @ptrCast(*const anyopaque, &fd), + @as(*const anyopaque, @ptrCast(&fd)), 1, ); try handle_registration_result(res); @@ -997,7 +997,7 @@ pub const IO_Uring = struct { const res = linux.io_uring_register( self.fd, .REGISTER_EVENTFD_ASYNC, - @ptrCast(*const anyopaque, &fd), + @as(*const anyopaque, @ptrCast(&fd)), 1, ); try handle_registration_result(res); @@ -1022,7 +1022,7 @@ pub const IO_Uring = struct { self.fd, .REGISTER_BUFFERS, buffers.ptr, - @intCast(u32, buffers.len), + @as(u32, @intCast(buffers.len)), ); try handle_registration_result(res); } @@ -1122,20 +1122,17 @@ pub const SubmissionQueue = struct { errdefer os.munmap(mmap_sqes); assert(mmap_sqes.len == size_sqes); - const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array])); - const sqes = @ptrCast([*]linux.io_uring_sqe, @alignCast(@alignOf(linux.io_uring_sqe), &mmap_sqes[0])); + const array: [*]u32 = @ptrCast(@alignCast(&mmap[p.sq_off.array])); + const sqes: [*]linux.io_uring_sqe = @ptrCast(@alignCast(&mmap_sqes[0])); // We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries, // see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844. - assert( - p.sq_entries == - @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*, - ); + assert(p.sq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_entries]))).*); return SubmissionQueue{ - .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])), - .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])), - .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*, - .flags = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.flags])), - .dropped = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.dropped])), + .head = @ptrCast(@alignCast(&mmap[p.sq_off.head])), + .tail = @ptrCast(@alignCast(&mmap[p.sq_off.tail])), + .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_mask]))).*, + .flags = @ptrCast(@alignCast(&mmap[p.sq_off.flags])), + .dropped = @ptrCast(@alignCast(&mmap[p.sq_off.dropped])), .array = array[0..p.sq_entries], .sqes = sqes[0..p.sq_entries], .mmap = mmap, @@ -1160,17 +1157,13 @@ pub const CompletionQueue = struct { assert(fd >= 0); assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0); const mmap = sq.mmap; - const cqes = @ptrCast( - [*]linux.io_uring_cqe, - @alignCast(@alignOf(linux.io_uring_cqe), &mmap[p.cq_off.cqes]), - ); - assert(p.cq_entries == - @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*); + const cqes: [*]linux.io_uring_cqe = @ptrCast(@alignCast(&mmap[p.cq_off.cqes])); + assert(p.cq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_entries]))).*); return CompletionQueue{ - .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])), - .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])), - .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*, - .overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])), + .head = @ptrCast(@alignCast(&mmap[p.cq_off.head])), + .tail = @ptrCast(@alignCast(&mmap[p.cq_off.tail])), + .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_mask]))).*, + .overflow = @ptrCast(@alignCast(&mmap[p.cq_off.overflow])), .cqes = cqes[0..p.cq_entries], }; } @@ -1233,7 +1226,7 @@ pub fn io_uring_prep_rw( .fd = fd, .off = offset, .addr = addr, - .len = @intCast(u32, len), + .len = @as(u32, @intCast(len)), .rw_flags = 0, .user_data = 0, .buf_index = 0, @@ -1319,7 +1312,7 @@ pub fn io_uring_prep_epoll_ctl( op: u32, ev: ?*linux.epoll_event, ) void { - io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @intCast(u64, fd)); + io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @as(u64, @intCast(fd))); } pub fn io_uring_prep_recv(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void { @@ -1459,7 +1452,7 @@ pub fn io_uring_prep_fallocate( .fd = fd, .off = offset, .addr = len, - .len = @intCast(u32, mode), + .len = @as(u32, @intCast(mode)), .rw_flags = 0, .user_data = 0, .buf_index = 0, @@ -1514,7 +1507,7 @@ pub fn io_uring_prep_renameat( 0, @intFromPtr(new_path), ); - sqe.len = @bitCast(u32, new_dir_fd); + sqe.len = @as(u32, @bitCast(new_dir_fd)); sqe.rw_flags = flags; } @@ -1569,7 +1562,7 @@ pub fn io_uring_prep_linkat( 0, @intFromPtr(new_path), ); - sqe.len = @bitCast(u32, new_dir_fd); + sqe.len = @as(u32, @bitCast(new_dir_fd)); sqe.rw_flags = flags; } @@ -1582,8 +1575,8 @@ pub fn io_uring_prep_provide_buffers( buffer_id: usize, ) void { const ptr = @intFromPtr(buffers); - io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @intCast(i32, num), ptr, buffer_len, buffer_id); - sqe.buf_index = @intCast(u16, group_id); + io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @as(i32, @intCast(num)), ptr, buffer_len, buffer_id); + sqe.buf_index = @as(u16, @intCast(group_id)); } pub fn io_uring_prep_remove_buffers( @@ -1591,8 +1584,8 @@ pub fn io_uring_prep_remove_buffers( num: usize, group_id: usize, ) void { - io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @intCast(i32, num), 0, 0, 0); - sqe.buf_index = @intCast(u16, group_id); + io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @as(i32, @intCast(num)), 0, 0, 0); + sqe.buf_index = @as(u16, @intCast(group_id)); } test "structs/offsets/entries" { @@ -1886,12 +1879,12 @@ test "write_fixed/read_fixed" { try testing.expectEqual(linux.io_uring_cqe{ .user_data = 0x45454545, - .res = @intCast(i32, buffers[0].iov_len), + .res = @as(i32, @intCast(buffers[0].iov_len)), .flags = 0, }, cqe_write); try testing.expectEqual(linux.io_uring_cqe{ .user_data = 0x12121212, - .res = @intCast(i32, buffers[1].iov_len), + .res = @as(i32, @intCast(buffers[1].iov_len)), .flags = 0, }, cqe_read); @@ -2145,7 +2138,7 @@ test "timeout (after a relative time)" { }, cqe); // Tests should not depend on timings: skip test if outside margin. - if (!std.math.approxEqAbs(f64, ms, @floatFromInt(f64, stopped - started), margin)) return error.SkipZigTest; + if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest; } test "timeout (after a number of completions)" { @@ -2637,7 +2630,7 @@ test "renameat" { ); try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode); try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd); - try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len)); + try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len))); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -2850,7 +2843,7 @@ test "linkat" { ); try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode); try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd); - try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len)); + try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len))); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -2898,7 +2891,7 @@ test "provide_buffers: read" { // Provide 4 buffers { - const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id); + const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id); try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode); try testing.expectEqual(@as(i32, buffers.len), sqe.fd); try testing.expectEqual(@as(u32, buffers[0].len), sqe.len); @@ -2939,7 +2932,7 @@ test "provide_buffers: read" { try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data); - try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]); + try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]); } // This read should fail @@ -2971,7 +2964,7 @@ test "provide_buffers: read" { const reprovided_buffer_id = 2; { - _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id); + _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -3003,7 +2996,7 @@ test "provide_buffers: read" { try testing.expectEqual(used_buffer_id, reprovided_buffer_id); try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data); - try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]); + try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]); } } @@ -3030,7 +3023,7 @@ test "remove_buffers" { // Provide 4 buffers { - _ = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id); + _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -3076,7 +3069,7 @@ test "remove_buffers" { try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4); try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data); - try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]); + try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]); } // Final read should _not_ work @@ -3119,7 +3112,7 @@ test "provide_buffers: accept/connect/send/recv" { // Provide 4 buffers { - const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id); + const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id); try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode); try testing.expectEqual(@as(i32, buffers.len), sqe.fd); try testing.expectEqual(@as(u32, buffer_len), sqe.len); @@ -3181,7 +3174,7 @@ test "provide_buffers: accept/connect/send/recv" { try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data); - const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)]; + const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]; try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer); } @@ -3213,7 +3206,7 @@ test "provide_buffers: accept/connect/send/recv" { const reprovided_buffer_id = 2; { - _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id); + _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id); try testing.expectEqual(@as(u32, 1), try ring.submit()); const cqe = try ring.copy_cqe(); @@ -3259,7 +3252,7 @@ test "provide_buffers: accept/connect/send/recv" { try testing.expectEqual(used_buffer_id, reprovided_buffer_id); try testing.expectEqual(@as(i32, buffer_len), cqe.res); try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data); - const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)]; + const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]; try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer); } } diff --git a/lib/std/os/linux/ioctl.zig b/lib/std/os/linux/ioctl.zig index 96ec96c3061c..7f5d36b72db8 100644 --- a/lib/std/os/linux/ioctl.zig +++ b/lib/std/os/linux/ioctl.zig @@ -32,7 +32,7 @@ fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 { .io_type = io_type, .nr = nr, }; - return @bitCast(u32, request); + return @as(u32, @bitCast(request)); } pub fn IO(io_type: u8, nr: u8) u32 { diff --git a/lib/std/os/linux/start_pie.zig b/lib/std/os/linux/start_pie.zig index c9b1cb1e922b..cf557f9d6674 100644 --- a/lib/std/os/linux/start_pie.zig +++ b/lib/std/os/linux/start_pie.zig @@ -103,17 +103,17 @@ pub fn relocate(phdrs: []elf.Phdr) void { // Apply the relocations. if (rel_addr != 0) { - const rel = std.mem.bytesAsSlice(elf.Rel, @ptrFromInt([*]u8, rel_addr)[0..rel_size]); + const rel = std.mem.bytesAsSlice(elf.Rel, @as([*]u8, @ptrFromInt(rel_addr))[0..rel_size]); for (rel) |r| { if (r.r_type() != R_RELATIVE) continue; - @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr; + @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr; } } if (rela_addr != 0) { - const rela = std.mem.bytesAsSlice(elf.Rela, @ptrFromInt([*]u8, rela_addr)[0..rela_size]); + const rela = std.mem.bytesAsSlice(elf.Rela, @as([*]u8, @ptrFromInt(rela_addr))[0..rela_size]); for (rela) |r| { if (r.r_type() != R_RELATIVE) continue; - @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr + @bitCast(usize, r.r_addend); + @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr + @as(usize, @bitCast(r.r_addend)); } } } diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index e1ad36b2e555..170bde6334c9 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -50,7 +50,7 @@ test "timer" { .it_value = time_interval, }; - err = linux.getErrno(linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null)); + err = linux.getErrno(linux.timerfd_settime(@as(i32, @intCast(timer_fd)), 0, &new_time, null)); try expect(err == .SUCCESS); var event = linux.epoll_event{ @@ -58,13 +58,13 @@ test "timer" { .data = linux.epoll_data{ .ptr = 0 }, }; - err = linux.getErrno(linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL.CTL_ADD, @intCast(i32, timer_fd), &event)); + err = linux.getErrno(linux.epoll_ctl(@as(i32, @intCast(epoll_fd)), linux.EPOLL.CTL_ADD, @as(i32, @intCast(timer_fd)), &event)); try expect(err == .SUCCESS); const events_one: linux.epoll_event = undefined; var events = [_]linux.epoll_event{events_one} ** 8; - err = linux.getErrno(linux.epoll_wait(@intCast(i32, epoll_fd), &events, 8, -1)); + err = linux.getErrno(linux.epoll_wait(@as(i32, @intCast(epoll_fd)), &events, 8, -1)); try expect(err == .SUCCESS); } @@ -91,11 +91,11 @@ test "statx" { } try expect(stat_buf.mode == statx_buf.mode); - try expect(@bitCast(u32, stat_buf.uid) == statx_buf.uid); - try expect(@bitCast(u32, stat_buf.gid) == statx_buf.gid); - try expect(@bitCast(u64, @as(i64, stat_buf.size)) == statx_buf.size); - try expect(@bitCast(u64, @as(i64, stat_buf.blksize)) == statx_buf.blksize); - try expect(@bitCast(u64, @as(i64, stat_buf.blocks)) == statx_buf.blocks); + try expect(@as(u32, @bitCast(stat_buf.uid)) == statx_buf.uid); + try expect(@as(u32, @bitCast(stat_buf.gid)) == statx_buf.gid); + try expect(@as(u64, @bitCast(@as(i64, stat_buf.size))) == statx_buf.size); + try expect(@as(u64, @bitCast(@as(i64, stat_buf.blksize))) == statx_buf.blksize); + try expect(@as(u64, @bitCast(@as(i64, stat_buf.blocks))) == statx_buf.blocks); } test "user and group ids" { diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index b60a2ed38897..94fa0d1a09cf 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -205,7 +205,7 @@ fn initTLS(phdrs: []elf.Phdr) void { // the data stored in the PT_TLS segment is p_filesz and may be less // than the former tls_align_factor = phdr.p_align; - tls_data = @ptrFromInt([*]u8, img_base + phdr.p_vaddr)[0..phdr.p_filesz]; + tls_data = @as([*]u8, @ptrFromInt(img_base + phdr.p_vaddr))[0..phdr.p_filesz]; tls_data_alloc_size = phdr.p_memsz; } else { tls_align_factor = @alignOf(usize); @@ -263,12 +263,12 @@ fn initTLS(phdrs: []elf.Phdr) void { .dtv_offset = dtv_offset, .data_offset = data_offset, .data_size = tls_data_alloc_size, - .gdt_entry_number = @bitCast(usize, @as(isize, -1)), + .gdt_entry_number = @as(usize, @bitCast(@as(isize, -1))), }; } inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T { - return @ptrCast(*T, @alignCast(@alignOf(T), ptr)); + return @ptrCast(@alignCast(ptr)); } /// Initializes all the fields of the static TLS area and returns the computed diff --git a/lib/std/os/linux/vdso.zig b/lib/std/os/linux/vdso.zig index c7dc7ae59991..50e7ce1dfddc 100644 --- a/lib/std/os/linux/vdso.zig +++ b/lib/std/os/linux/vdso.zig @@ -8,7 +8,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { const vdso_addr = std.os.system.getauxval(std.elf.AT_SYSINFO_EHDR); if (vdso_addr == 0) return 0; - const eh = @ptrFromInt(*elf.Ehdr, vdso_addr); + const eh = @as(*elf.Ehdr, @ptrFromInt(vdso_addr)); var ph_addr: usize = vdso_addr + eh.e_phoff; var maybe_dynv: ?[*]usize = null; @@ -19,14 +19,14 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { i += 1; ph_addr += eh.e_phentsize; }) { - const this_ph = @ptrFromInt(*elf.Phdr, ph_addr); + const this_ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (this_ph.p_type) { // On WSL1 as well as older kernels, the VDSO ELF image is pre-linked in the upper half // of the memory space (e.g. p_vaddr = 0xffffffffff700000 on WSL1). // Wrapping operations are used on this line as well as subsequent calculations relative to base // (lines 47, 78) to ensure no overflow check is tripped. elf.PT_LOAD => base = vdso_addr +% this_ph.p_offset -% this_ph.p_vaddr, - elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, vdso_addr + this_ph.p_offset), + elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(vdso_addr + this_ph.p_offset)), else => {}, } } @@ -45,11 +45,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { while (dynv[i] != 0) : (i += 2) { const p = base +% dynv[i + 1]; switch (dynv[i]) { - elf.DT_STRTAB => maybe_strings = @ptrFromInt([*]u8, p), - elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p), - elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]linux.Elf_Symndx, p), - elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p), - elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p), + elf.DT_STRTAB => maybe_strings = @as([*]u8, @ptrFromInt(p)), + elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)), + elf.DT_HASH => maybe_hashtab = @as([*]linux.Elf_Symndx, @ptrFromInt(p)), + elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)), + elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)), else => {}, } } @@ -65,10 +65,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { var i: usize = 0; while (i < hashtab[1]) : (i += 1) { - if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue; - if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info & 0xf)) & OK_TYPES)) continue; + if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info >> 4)) & OK_BINDS)) continue; if (0 == syms[i].st_shndx) continue; - const sym_name = @ptrCast([*:0]u8, strings + syms[i].st_name); + const sym_name = @as([*:0]u8, @ptrCast(strings + syms[i].st_name)); if (!mem.eql(u8, name, mem.sliceTo(sym_name, 0))) continue; if (maybe_versym) |versym| { if (!checkver(maybe_verdef.?, versym[i], vername, strings)) @@ -82,15 +82,15 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool { var def = def_arg; - const vsym = @bitCast(u32, vsym_arg) & 0x7fff; + const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff; while (true) { if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym) break; if (def.vd_next == 0) return false; - def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next); + def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next)); } - const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux); - const vda_name = @ptrCast([*:0]u8, strings + aux.vda_name); + const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux)); + const vda_name = @as([*:0]u8, @ptrCast(strings + aux.vda_name)); return mem.eql(u8, vername, mem.sliceTo(vda_name, 0)); } diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig index b628bc2afced..3e1137c7ce21 100644 --- a/lib/std/os/plan9.zig +++ b/lib/std/os/plan9.zig @@ -8,9 +8,9 @@ pub const syscall_bits = switch (builtin.cpu.arch) { pub const E = @import("plan9/errno.zig").E; /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: usize) E { - const signed_r = @bitCast(isize, r); + const signed_r = @as(isize, @bitCast(r)); const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0; - return @enumFromInt(E, int); + return @as(E, @enumFromInt(int)); } pub const SIG = struct { /// hangup diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index 888b2f5c1cf3..d5451f64ac61 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -488,7 +488,7 @@ fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void { const reloc_addr = info.dlpi_addr + phdr.p_vaddr; // Find the ELF header - const elf_header = @ptrFromInt(*elf.Ehdr, reloc_addr - phdr.p_offset); + const elf_header = @as(*elf.Ehdr, @ptrFromInt(reloc_addr - phdr.p_offset)); // Validate the magic if (!mem.eql(u8, elf_header.e_ident[0..4], elf.MAGIC)) return error.BadElfMagic; // Consistency check @@ -751,7 +751,7 @@ test "getrlimit and setrlimit" { } inline for (std.meta.fields(os.rlimit_resource)) |field| { - const resource = @enumFromInt(os.rlimit_resource, field.value); + const resource = @as(os.rlimit_resource, @enumFromInt(field.value)); const limit = try os.getrlimit(resource); // On 32 bit MIPS musl includes a fix which changes limits greater than -1UL/2 to RLIM_INFINITY. diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig index f51caaa86fda..7c6eb08a93c1 100644 --- a/lib/std/os/uefi.zig +++ b/lib/std/os/uefi.zig @@ -143,7 +143,7 @@ pub const FileHandle = *opaque {}; test "GUID formatting" { var bytes = [_]u8{ 137, 60, 203, 50, 128, 128, 124, 66, 186, 19, 80, 73, 135, 59, 194, 135 }; - var guid = @bitCast(Guid, bytes); + var guid = @as(Guid, @bitCast(bytes)); var str = try std.fmt.allocPrint(std.testing.allocator, "{}", .{guid}); defer std.testing.allocator.free(str); diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig index c24d9416f136..3f64a2f3f64b 100644 --- a/lib/std/os/uefi/pool_allocator.zig +++ b/lib/std/os/uefi/pool_allocator.zig @@ -9,7 +9,7 @@ const Allocator = mem.Allocator; const UefiPoolAllocator = struct { fn getHeader(ptr: [*]u8) *[*]align(8) u8 { - return @ptrFromInt(*[*]align(8) u8, @intFromPtr(ptr) - @sizeOf(usize)); + return @as(*[*]align(8) u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); } fn alloc( @@ -22,7 +22,7 @@ const UefiPoolAllocator = struct { assert(len > 0); - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align); @@ -135,5 +135,5 @@ fn uefi_free( ) void { _ = log2_old_ptr_align; _ = ret_addr; - _ = uefi.system_table.boot_services.?.freePool(@alignCast(8, buf.ptr)); + _ = uefi.system_table.boot_services.?.freePool(@alignCast(buf.ptr)); } diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig index c64084e6ed8e..a083959521d4 100644 --- a/lib/std/os/uefi/protocols/device_path_protocol.zig +++ b/lib/std/os/uefi/protocols/device_path_protocol.zig @@ -23,10 +23,10 @@ pub const DevicePathProtocol = extern struct { /// Returns the next DevicePathProtocol node in the sequence, if any. pub fn next(self: *DevicePathProtocol) ?*DevicePathProtocol { - if (self.type == .End and @enumFromInt(EndDevicePath.Subtype, self.subtype) == .EndEntire) + if (self.type == .End and @as(EndDevicePath.Subtype, @enumFromInt(self.subtype)) == .EndEntire) return null; - return @ptrCast(*DevicePathProtocol, @ptrCast([*]u8, self) + self.length); + return @as(*DevicePathProtocol, @ptrCast(@as([*]u8, @ptrCast(self)) + self.length)); } /// Calculates the total length of the device path structure in bytes, including the end of device path node. @@ -48,30 +48,30 @@ pub const DevicePathProtocol = extern struct { // DevicePathProtocol for the extra node before the end var buf = try allocator.alloc(u8, path_size + 2 * (path.len + 1) + @sizeOf(DevicePathProtocol)); - @memcpy(buf[0..path_size.len], @ptrCast([*]const u8, self)[0..path_size]); + @memcpy(buf[0..path_size.len], @as([*]const u8, @ptrCast(self))[0..path_size]); // Pointer to the copy of the end node of the current chain, which is - 4 from the buffer // as the end node itself is 4 bytes (type: u8 + subtype: u8 + length: u16). - var new = @ptrCast(*MediaDevicePath.FilePathDevicePath, buf.ptr + path_size - 4); + var new = @as(*MediaDevicePath.FilePathDevicePath, @ptrCast(buf.ptr + path_size - 4)); new.type = .Media; new.subtype = .FilePath; - new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@intCast(u16, path.len) + 1); + new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@as(u16, @intCast(path.len)) + 1); // The same as new.getPath(), but not const as we're filling it in. - var ptr = @ptrCast([*:0]align(1) u16, @ptrCast([*]u8, new) + @sizeOf(MediaDevicePath.FilePathDevicePath)); + var ptr = @as([*:0]align(1) u16, @ptrCast(@as([*]u8, @ptrCast(new)) + @sizeOf(MediaDevicePath.FilePathDevicePath))); for (path, 0..) |s, i| ptr[i] = s; ptr[path.len] = 0; - var end = @ptrCast(*EndDevicePath.EndEntireDevicePath, @ptrCast(*DevicePathProtocol, new).next().?); + var end = @as(*EndDevicePath.EndEntireDevicePath, @ptrCast(@as(*DevicePathProtocol, @ptrCast(new)).next().?)); end.type = .End; end.subtype = .EndEntire; end.length = @sizeOf(EndDevicePath.EndEntireDevicePath); - return @ptrCast(*DevicePathProtocol, buf.ptr); + return @as(*DevicePathProtocol, @ptrCast(buf.ptr)); } pub fn getDevicePath(self: *const DevicePathProtocol) ?DevicePath { @@ -103,7 +103,7 @@ pub const DevicePathProtocol = extern struct { if (self.subtype == tag_val) { // e.g. expr = .{ .Pci = @ptrCast(...) } - return @unionInit(TUnion, subtype.name, @ptrCast(subtype.type, self)); + return @unionInit(TUnion, subtype.name, @as(subtype.type, @ptrCast(self))); } } @@ -332,7 +332,7 @@ pub const AcpiDevicePath = union(Subtype) { pub fn adrs(self: *const AdrDevicePath) []align(1) const u32 { // self.length is a minimum of 8 with one adr which is size 4. var entries = (self.length - 4) / @sizeOf(u32); - return @ptrCast([*]align(1) const u32, &self.adr)[0..entries]; + return @as([*]align(1) const u32, @ptrCast(&self.adr))[0..entries]; } }; @@ -550,7 +550,7 @@ pub const MessagingDevicePath = union(Subtype) { pub fn serial_number(self: *const UsbWwidDevicePath) []align(1) const u16 { var serial_len = (self.length - @sizeOf(UsbWwidDevicePath)) / @sizeOf(u16); - return @ptrCast([*]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(UsbWwidDevicePath))[0..serial_len]; + return @as([*]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(UsbWwidDevicePath)))[0..serial_len]; } }; @@ -943,7 +943,7 @@ pub const MediaDevicePath = union(Subtype) { length: u16 align(1), pub fn getPath(self: *const FilePathDevicePath) [*:0]align(1) const u16 { - return @ptrCast([*:0]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(FilePathDevicePath)); + return @as([*:0]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FilePathDevicePath))); } }; @@ -1068,7 +1068,7 @@ pub const BiosBootSpecificationDevicePath = union(Subtype) { status_flag: u16 align(1), pub fn getDescription(self: *const BBS101DevicePath) [*:0]const u8 { - return @ptrCast([*:0]const u8, self) + @sizeOf(BBS101DevicePath); + return @as([*:0]const u8, @ptrCast(self)) + @sizeOf(BBS101DevicePath); } }; diff --git a/lib/std/os/uefi/protocols/file_protocol.zig b/lib/std/os/uefi/protocols/file_protocol.zig index 729d4020b4d0..53ec5f81e37e 100644 --- a/lib/std/os/uefi/protocols/file_protocol.zig +++ b/lib/std/os/uefi/protocols/file_protocol.zig @@ -152,7 +152,7 @@ pub const FileInfo = extern struct { attribute: u64, pub fn getFileName(self: *const FileInfo) [*:0]const u16 { - return @ptrCast([*:0]const u16, @ptrCast([*]const u8, self) + @sizeOf(FileInfo)); + return @as([*:0]const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FileInfo))); } pub const efi_file_read_only: u64 = 0x0000000000000001; @@ -182,7 +182,7 @@ pub const FileSystemInfo = extern struct { _volume_label: u16, pub fn getVolumeLabel(self: *const FileSystemInfo) [*:0]const u16 { - return @ptrCast([*:0]const u16, &self._volume_label); + return @as([*:0]const u16, @ptrCast(&self._volume_label)); } pub const guid align(8) = Guid{ diff --git a/lib/std/os/uefi/protocols/hii.zig b/lib/std/os/uefi/protocols/hii.zig index 437fa29739eb..c7199d2950a8 100644 --- a/lib/std/os/uefi/protocols/hii.zig +++ b/lib/std/os/uefi/protocols/hii.zig @@ -39,7 +39,7 @@ pub const HIISimplifiedFontPackage = extern struct { number_of_wide_glyphs: u16, pub fn getNarrowGlyphs(self: *HIISimplifiedFontPackage) []NarrowGlyph { - return @ptrCast([*]NarrowGlyph, @ptrCast([*]u8, self) + @sizeOf(HIISimplifiedFontPackage))[0..self.number_of_narrow_glyphs]; + return @as([*]NarrowGlyph, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(HIISimplifiedFontPackage)))[0..self.number_of_narrow_glyphs]; } }; diff --git a/lib/std/os/uefi/protocols/managed_network_protocol.zig b/lib/std/os/uefi/protocols/managed_network_protocol.zig index aff9febd17bc..5ea63f5a658a 100644 --- a/lib/std/os/uefi/protocols/managed_network_protocol.zig +++ b/lib/std/os/uefi/protocols/managed_network_protocol.zig @@ -118,7 +118,7 @@ pub const ManagedNetworkTransmitData = extern struct { fragment_count: u16, pub fn getFragments(self: *ManagedNetworkTransmitData) []ManagedNetworkFragmentData { - return @ptrCast([*]ManagedNetworkFragmentData, @ptrCast([*]u8, self) + @sizeOf(ManagedNetworkTransmitData))[0..self.fragment_count]; + return @as([*]ManagedNetworkFragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(ManagedNetworkTransmitData)))[0..self.fragment_count]; } }; diff --git a/lib/std/os/uefi/protocols/udp6_protocol.zig b/lib/std/os/uefi/protocols/udp6_protocol.zig index 96a1d4c3182b..f772d38d5258 100644 --- a/lib/std/os/uefi/protocols/udp6_protocol.zig +++ b/lib/std/os/uefi/protocols/udp6_protocol.zig @@ -87,7 +87,7 @@ pub const Udp6ReceiveData = extern struct { fragment_count: u32, pub fn getFragments(self: *Udp6ReceiveData) []Udp6FragmentData { - return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6ReceiveData))[0..self.fragment_count]; + return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6ReceiveData)))[0..self.fragment_count]; } }; @@ -97,7 +97,7 @@ pub const Udp6TransmitData = extern struct { fragment_count: u32, pub fn getFragments(self: *Udp6TransmitData) []Udp6FragmentData { - return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6TransmitData))[0..self.fragment_count]; + return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6TransmitData)))[0..self.fragment_count]; } }; diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig index bfd3865e95c3..7fc32decb9e6 100644 --- a/lib/std/os/uefi/tables/boot_services.zig +++ b/lib/std/os/uefi/tables/boot_services.zig @@ -165,7 +165,7 @@ pub const BootServices = extern struct { try self.openProtocol( handle, &protocol.guid, - @ptrCast(*?*anyopaque, &ptr), + @as(*?*anyopaque, @ptrCast(&ptr)), // Invoking handle (loaded image) uefi.handle, // Control handle (null as not a driver) diff --git a/lib/std/os/wasi.zig b/lib/std/os/wasi.zig index 711352e2fe20..951d8ee26db8 100644 --- a/lib/std/os/wasi.zig +++ b/lib/std/os/wasi.zig @@ -103,13 +103,13 @@ pub const timespec = extern struct { const tv_sec: timestamp_t = tm / 1_000_000_000; const tv_nsec = tm - tv_sec * 1_000_000_000; return timespec{ - .tv_sec = @intCast(time_t, tv_sec), - .tv_nsec = @intCast(isize, tv_nsec), + .tv_sec = @as(time_t, @intCast(tv_sec)), + .tv_nsec = @as(isize, @intCast(tv_nsec)), }; } pub fn toTimestamp(ts: timespec) timestamp_t { - const tm = @intCast(timestamp_t, ts.tv_sec * 1_000_000_000) + @intCast(timestamp_t, ts.tv_nsec); + const tm = @as(timestamp_t, @intCast(ts.tv_sec * 1_000_000_000)) + @as(timestamp_t, @intCast(ts.tv_nsec)); return tm; } }; diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 421815c04d97..e12e8ac4d3bf 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -30,7 +30,7 @@ pub const gdi32 = @import("windows/gdi32.zig"); pub const winmm = @import("windows/winmm.zig"); pub const crypt32 = @import("windows/crypt32.zig"); -pub const self_process_handle = @ptrFromInt(HANDLE, maxInt(usize)); +pub const self_process_handle = @as(HANDLE, @ptrFromInt(maxInt(usize))); const Self = @This(); @@ -198,9 +198,9 @@ pub fn DeviceIoControl( var io: IO_STATUS_BLOCK = undefined; const in_ptr = if (in) |i| i.ptr else null; - const in_len = if (in) |i| @intCast(ULONG, i.len) else 0; + const in_len = if (in) |i| @as(ULONG, @intCast(i.len)) else 0; const out_ptr = if (out) |o| o.ptr else null; - const out_len = if (out) |o| @intCast(ULONG, o.len) else 0; + const out_len = if (out) |o| @as(ULONG, @intCast(o.len)) else 0; const rc = blk: { if (is_fsctl) { @@ -307,7 +307,7 @@ pub fn WaitForSingleObjectEx(handle: HANDLE, milliseconds: DWORD, alertable: boo pub fn WaitForMultipleObjectsEx(handles: []const HANDLE, waitAll: bool, milliseconds: DWORD, alertable: bool) !u32 { assert(handles.len < MAXIMUM_WAIT_OBJECTS); - const nCount: DWORD = @intCast(DWORD, handles.len); + const nCount: DWORD = @as(DWORD, @intCast(handles.len)); switch (kernel32.WaitForMultipleObjectsEx( nCount, handles.ptr, @@ -419,7 +419,7 @@ pub fn GetQueuedCompletionStatusEx( const success = kernel32.GetQueuedCompletionStatusEx( completion_port, completion_port_entries.ptr, - @intCast(ULONG, completion_port_entries.len), + @as(ULONG, @intCast(completion_port_entries.len)), &num_entries_removed, timeout_ms orelse INFINITE, @intFromBool(alertable), @@ -469,8 +469,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -480,7 +480,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo loop.beginOneEvent(); suspend { // TODO handle buffer bigger than DWORD can hold - _ = kernel32.ReadFile(in_hFile, buffer.ptr, @intCast(DWORD, buffer.len), null, &resume_node.base.overlapped); + _ = kernel32.ReadFile(in_hFile, buffer.ptr, @as(DWORD, @intCast(buffer.len)), null, &resume_node.base.overlapped); } var bytes_transferred: DWORD = undefined; if (kernel32.GetOverlappedResult(in_hFile, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) { @@ -496,7 +496,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo if (offset == null) { // TODO make setting the file position non-blocking const new_off = off + bytes_transferred; - try SetFilePointerEx_CURRENT(in_hFile, @bitCast(i64, new_off)); + try SetFilePointerEx_CURRENT(in_hFile, @as(i64, @bitCast(new_off))); } return @as(usize, bytes_transferred); } else { @@ -510,8 +510,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -563,8 +563,8 @@ pub fn WriteFile( .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -591,7 +591,7 @@ pub fn WriteFile( if (offset == null) { // TODO make setting the file position non-blocking const new_off = off + bytes_transferred; - try SetFilePointerEx_CURRENT(handle, @bitCast(i64, new_off)); + try SetFilePointerEx_CURRENT(handle, @as(i64, @bitCast(new_off))); } return bytes_transferred; } else { @@ -603,8 +603,8 @@ pub fn WriteFile( .InternalHigh = 0, .DUMMYUNIONNAME = .{ .DUMMYSTRUCTNAME = .{ - .Offset = @truncate(u32, off), - .OffsetHigh = @truncate(u32, off >> 32), + .Offset = @as(u32, @truncate(off)), + .OffsetHigh = @as(u32, @truncate(off >> 32)), }, }, .hEvent = null, @@ -745,19 +745,19 @@ pub fn CreateSymbolicLink( const header_len = @sizeOf(ULONG) + @sizeOf(USHORT) * 2; const symlink_data = SYMLINK_DATA{ .ReparseTag = IO_REPARSE_TAG_SYMLINK, - .ReparseDataLength = @intCast(u16, buf_len - header_len), + .ReparseDataLength = @as(u16, @intCast(buf_len - header_len)), .Reserved = 0, - .SubstituteNameOffset = @intCast(u16, target_path.len * 2), - .SubstituteNameLength = @intCast(u16, target_path.len * 2), + .SubstituteNameOffset = @as(u16, @intCast(target_path.len * 2)), + .SubstituteNameLength = @as(u16, @intCast(target_path.len * 2)), .PrintNameOffset = 0, - .PrintNameLength = @intCast(u16, target_path.len * 2), + .PrintNameLength = @as(u16, @intCast(target_path.len * 2)), .Flags = if (dir) |_| SYMLINK_FLAG_RELATIVE else 0, }; @memcpy(buffer[0..@sizeOf(SYMLINK_DATA)], std.mem.asBytes(&symlink_data)); - @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path)); + @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path))); const paths_start = @sizeOf(SYMLINK_DATA) + target_path.len * 2; - @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path)); + @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path))); _ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null); } @@ -827,10 +827,10 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin else => |e| return e, }; - const reparse_struct = @ptrCast(*const REPARSE_DATA_BUFFER, @alignCast(@alignOf(REPARSE_DATA_BUFFER), &reparse_buf[0])); + const reparse_struct: *const REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0])); switch (reparse_struct.ReparseTag) { IO_REPARSE_TAG_SYMLINK => { - const buf = @ptrCast(*const SYMBOLIC_LINK_REPARSE_BUFFER, @alignCast(@alignOf(SYMBOLIC_LINK_REPARSE_BUFFER), &reparse_struct.DataBuffer[0])); + const buf: *const SYMBOLIC_LINK_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0])); const offset = buf.SubstituteNameOffset >> 1; const len = buf.SubstituteNameLength >> 1; const path_buf = @as([*]const u16, &buf.PathBuffer); @@ -838,7 +838,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin return parseReadlinkPath(path_buf[offset..][0..len], is_relative, out_buffer); }, IO_REPARSE_TAG_MOUNT_POINT => { - const buf = @ptrCast(*const MOUNT_POINT_REPARSE_BUFFER, @alignCast(@alignOf(MOUNT_POINT_REPARSE_BUFFER), &reparse_struct.DataBuffer[0])); + const buf: *const MOUNT_POINT_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0])); const offset = buf.SubstituteNameOffset >> 1; const len = buf.SubstituteNameLength >> 1; const path_buf = @as([*]const u16, &buf.PathBuffer); @@ -884,7 +884,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil else FILE_NON_DIRECTORY_FILE | FILE_OPEN_REPARSE_POINT; // would we ever want to delete the target instead? - const path_len_bytes = @intCast(u16, sub_path_w.len * 2); + const path_len_bytes = @as(u16, @intCast(sub_path_w.len * 2)); var nt_name = UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, @@ -1020,7 +1020,7 @@ pub fn SetFilePointerEx_BEGIN(handle: HANDLE, offset: u64) SetFilePointerError!v // "The starting point is zero or the beginning of the file. If [FILE_BEGIN] // is specified, then the liDistanceToMove parameter is interpreted as an unsigned value." // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-setfilepointerex - const ipos = @bitCast(LARGE_INTEGER, offset); + const ipos = @as(LARGE_INTEGER, @bitCast(offset)); if (kernel32.SetFilePointerEx(handle, ipos, null, FILE_BEGIN) == 0) { switch (kernel32.GetLastError()) { .INVALID_PARAMETER => unreachable, @@ -1064,7 +1064,7 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 { } // Based on the docs for FILE_BEGIN, it seems that the returned signed integer // should be interpreted as an unsigned integer. - return @bitCast(u64, result); + return @as(u64, @bitCast(result)); } pub fn QueryObjectName( @@ -1073,7 +1073,7 @@ pub fn QueryObjectName( ) ![]u16 { const out_buffer_aligned = mem.alignInSlice(out_buffer, @alignOf(OBJECT_NAME_INFORMATION)) orelse return error.NameTooLong; - const info = @ptrCast(*OBJECT_NAME_INFORMATION, out_buffer_aligned); + const info = @as(*OBJECT_NAME_INFORMATION, @ptrCast(out_buffer_aligned)); //buffer size is specified in bytes const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse std.math.maxInt(ULONG); //last argument would return the length required for full_buffer, not exposed here @@ -1197,26 +1197,26 @@ pub fn GetFinalPathNameByHandle( }; defer CloseHandle(mgmt_handle); - var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]); + var input_struct = @as(*MOUNTMGR_MOUNT_POINT, @ptrCast(&input_buf[0])); input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT); - input_struct.DeviceNameLength = @intCast(USHORT, volume_name_u16.len * 2); - @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @ptrCast([*]const u8, volume_name_u16.ptr)); + input_struct.DeviceNameLength = @as(USHORT, @intCast(volume_name_u16.len * 2)); + @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @as([*]const u8, @ptrCast(volume_name_u16.ptr))); DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) { error.AccessDenied => unreachable, else => |e| return e, }; - const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, &output_buf[0]); + const mount_points_struct = @as(*const MOUNTMGR_MOUNT_POINTS, @ptrCast(&output_buf[0])); - const mount_points = @ptrCast( + const mount_points = @as( [*]const MOUNTMGR_MOUNT_POINT, - &mount_points_struct.MountPoints[0], + @ptrCast(&mount_points_struct.MountPoints[0]), )[0..mount_points_struct.NumberOfMountPoints]; for (mount_points) |mount_point| { - const symlink = @ptrCast( + const symlink = @as( [*]const u16, - @alignCast(@alignOf(u16), &output_buf[mount_point.SymbolicLinkNameOffset]), + @ptrCast(@alignCast(&output_buf[mount_point.SymbolicLinkNameOffset])), )[0 .. mount_point.SymbolicLinkNameLength / 2]; // Look for `\DosDevices\` prefix. We don't really care if there are more than one symlinks @@ -1282,7 +1282,7 @@ pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 { else => |err| return unexpectedError(err), } } - return @bitCast(u64, file_size); + return @as(u64, @bitCast(file_size)); } pub const GetFileAttributesError = error{ @@ -1313,7 +1313,7 @@ pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA { var wsadata: ws2_32.WSADATA = undefined; return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) { 0 => wsadata, - else => |err_int| switch (@enumFromInt(ws2_32.WinsockError, @intCast(u16, err_int))) { + else => |err_int| switch (@as(ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(err_int))))) { .WSASYSNOTREADY => return error.SystemNotAvailable, .WSAVERNOTSUPPORTED => return error.VersionNotSupported, .WSAEINPROGRESS => return error.BlockingOperationInProgress, @@ -1408,7 +1408,7 @@ pub fn WSASocketW( } pub fn bind(s: ws2_32.SOCKET, name: *const ws2_32.sockaddr, namelen: ws2_32.socklen_t) i32 { - return ws2_32.bind(s, name, @intCast(i32, namelen)); + return ws2_32.bind(s, name, @as(i32, @intCast(namelen))); } pub fn listen(s: ws2_32.SOCKET, backlog: u31) i32 { @@ -1427,15 +1427,15 @@ pub fn closesocket(s: ws2_32.SOCKET) !void { pub fn accept(s: ws2_32.SOCKET, name: ?*ws2_32.sockaddr, namelen: ?*ws2_32.socklen_t) ws2_32.SOCKET { assert((name == null) == (namelen == null)); - return ws2_32.accept(s, name, @ptrCast(?*i32, namelen)); + return ws2_32.accept(s, name, @as(?*i32, @ptrCast(namelen))); } pub fn getsockname(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 { - return ws2_32.getsockname(s, name, @ptrCast(*i32, namelen)); + return ws2_32.getsockname(s, name, @as(*i32, @ptrCast(namelen))); } pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 { - return ws2_32.getpeername(s, name, @ptrCast(*i32, namelen)); + return ws2_32.getpeername(s, name, @as(*i32, @ptrCast(namelen))); } pub fn sendmsg( @@ -1447,28 +1447,28 @@ pub fn sendmsg( if (ws2_32.WSASendMsg(s, msg, flags, &bytes_send, null, null) == ws2_32.SOCKET_ERROR) { return ws2_32.SOCKET_ERROR; } else { - return @as(i32, @intCast(u31, bytes_send)); + return @as(i32, @as(u31, @intCast(bytes_send))); } } pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 { - var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @constCast(buf) }; + var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = @constCast(buf) }; var bytes_send: DWORD = undefined; - if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) { + if (ws2_32.WSASendTo(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_send, flags, to, @as(i32, @intCast(to_len)), null, null) == ws2_32.SOCKET_ERROR) { return ws2_32.SOCKET_ERROR; } else { - return @as(i32, @intCast(u31, bytes_send)); + return @as(i32, @as(u31, @intCast(bytes_send))); } } pub fn recvfrom(s: ws2_32.SOCKET, buf: [*]u8, len: usize, flags: u32, from: ?*ws2_32.sockaddr, from_len: ?*ws2_32.socklen_t) i32 { - var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = buf }; + var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = buf }; var bytes_received: DWORD = undefined; var flags_inout = flags; - if (ws2_32.WSARecvFrom(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_received, &flags_inout, from, @ptrCast(?*i32, from_len), null, null) == ws2_32.SOCKET_ERROR) { + if (ws2_32.WSARecvFrom(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_received, &flags_inout, from, @as(?*i32, @ptrCast(from_len)), null, null) == ws2_32.SOCKET_ERROR) { return ws2_32.SOCKET_ERROR; } else { - return @as(i32, @intCast(u31, bytes_received)); + return @as(i32, @as(u31, @intCast(bytes_received))); } } @@ -1489,9 +1489,9 @@ pub fn WSAIoctl( s, dwIoControlCode, if (inBuffer) |i| i.ptr else null, - if (inBuffer) |i| @intCast(DWORD, i.len) else 0, + if (inBuffer) |i| @as(DWORD, @intCast(i.len)) else 0, outBuffer.ptr, - @intCast(DWORD, outBuffer.len), + @as(DWORD, @intCast(outBuffer.len)), &bytes, overlapped, completionRoutine, @@ -1741,7 +1741,7 @@ pub fn QueryPerformanceFrequency() u64 { var result: LARGE_INTEGER = undefined; assert(kernel32.QueryPerformanceFrequency(&result) != 0); // The kernel treats this integer as unsigned. - return @bitCast(u64, result); + return @as(u64, @bitCast(result)); } pub fn QueryPerformanceCounter() u64 { @@ -1750,7 +1750,7 @@ pub fn QueryPerformanceCounter() u64 { var result: LARGE_INTEGER = undefined; assert(kernel32.QueryPerformanceCounter(&result) != 0); // The kernel treats this integer as unsigned. - return @bitCast(u64, result); + return @as(u64, @bitCast(result)); } pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter: ?*anyopaque, Context: ?*anyopaque) void { @@ -1852,7 +1852,7 @@ pub fn teb() *TEB { return switch (native_arch) { .x86 => blk: { if (builtin.zig_backend == .stage2_c) { - break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_windows_teb())); + break :blk @ptrCast(@alignCast(zig_x86_windows_teb())); } else { break :blk asm volatile ( \\ movl %%fs:0x18, %[ptr] @@ -1862,7 +1862,7 @@ pub fn teb() *TEB { }, .x86_64 => blk: { if (builtin.zig_backend == .stage2_c) { - break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb())); + break :blk @ptrCast(@alignCast(zig_x86_64_windows_teb())); } else { break :blk asm volatile ( \\ movq %%gs:0x30, %[ptr] @@ -1894,7 +1894,7 @@ pub fn fromSysTime(hns: i64) i128 { pub fn toSysTime(ns: i128) i64 { const hns = @divFloor(ns, 100); - return @intCast(i64, hns) - std.time.epoch.windows * (std.time.ns_per_s / 100); + return @as(i64, @intCast(hns)) - std.time.epoch.windows * (std.time.ns_per_s / 100); } pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 { @@ -1904,22 +1904,22 @@ pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 { /// Converts a number of nanoseconds since the POSIX epoch to a Windows FILETIME. pub fn nanoSecondsToFileTime(ns: i128) FILETIME { - const adjusted = @bitCast(u64, toSysTime(ns)); + const adjusted = @as(u64, @bitCast(toSysTime(ns))); return FILETIME{ - .dwHighDateTime = @truncate(u32, adjusted >> 32), - .dwLowDateTime = @truncate(u32, adjusted), + .dwHighDateTime = @as(u32, @truncate(adjusted >> 32)), + .dwLowDateTime = @as(u32, @truncate(adjusted)), }; } /// Compares two WTF16 strings using RtlEqualUnicodeString pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool { - const a_bytes = @intCast(u16, a.len * 2); + const a_bytes = @as(u16, @intCast(a.len * 2)); const a_string = UNICODE_STRING{ .Length = a_bytes, .MaximumLength = a_bytes, .Buffer = @constCast(a.ptr), }; - const b_bytes = @intCast(u16, b.len * 2); + const b_bytes = @as(u16, @intCast(b.len * 2)); const b_string = UNICODE_STRING{ .Length = b_bytes, .MaximumLength = b_bytes, @@ -2117,7 +2117,7 @@ pub fn wToPrefixedFileW(path: [:0]const u16) !PathSpace { .unc_absolute => nt_prefix.len + 2, else => nt_prefix.len, }; - const buf_len = @intCast(u32, path_space.data.len - path_buf_offset); + const buf_len = @as(u32, @intCast(path_space.data.len - path_buf_offset)); const path_byte_len = ntdll.RtlGetFullPathName_U( path.ptr, buf_len * 2, @@ -2263,7 +2263,7 @@ test getUnprefixedPathType { } fn getFullPathNameW(path: [*:0]const u16, out: []u16) !usize { - const result = kernel32.GetFullPathNameW(path, @intCast(u32, out.len), out.ptr, null); + const result = kernel32.GetFullPathNameW(path, @as(u32, @intCast(out.len)), out.ptr, null); if (result == 0) { switch (kernel32.GetLastError()) { else => |err| return unexpectedError(err), @@ -2284,9 +2284,9 @@ pub fn loadWinsockExtensionFunction(comptime T: type, sock: ws2_32.SOCKET, guid: const rc = ws2_32.WSAIoctl( sock, ws2_32.SIO_GET_EXTENSION_FUNCTION_POINTER, - @ptrCast(*const anyopaque, &guid), + @as(*const anyopaque, @ptrCast(&guid)), @sizeOf(GUID), - @ptrFromInt(?*anyopaque, @intFromPtr(&function)), + @as(?*anyopaque, @ptrFromInt(@intFromPtr(&function))), @sizeOf(T), &num_bytes, null, @@ -2332,7 +2332,7 @@ pub fn unexpectedError(err: Win32Error) std.os.UnexpectedError { } pub fn unexpectedWSAError(err: ws2_32.WinsockError) std.os.UnexpectedError { - return unexpectedError(@enumFromInt(Win32Error, @intFromEnum(err))); + return unexpectedError(@as(Win32Error, @enumFromInt(@intFromEnum(err)))); } /// Call this when you made a windows NtDll call @@ -2530,7 +2530,7 @@ pub fn CTL_CODE(deviceType: u16, function: u12, method: TransferType, access: u2 @intFromEnum(method); } -pub const INVALID_HANDLE_VALUE = @ptrFromInt(HANDLE, maxInt(usize)); +pub const INVALID_HANDLE_VALUE = @as(HANDLE, @ptrFromInt(maxInt(usize))); pub const INVALID_FILE_ATTRIBUTES = @as(DWORD, maxInt(DWORD)); @@ -3119,7 +3119,7 @@ pub const GUID = extern struct { bytes[i] = (try std.fmt.charToDigit(s[hex_offset], 16)) << 4 | try std.fmt.charToDigit(s[hex_offset + 1], 16); } - return @bitCast(GUID, bytes); + return @as(GUID, @bitCast(bytes)); } }; @@ -3150,16 +3150,16 @@ pub const KF_FLAG_SIMPLE_IDLIST = 256; pub const KF_FLAG_ALIAS_ONLY = -2147483648; pub const S_OK = 0; -pub const E_NOTIMPL = @bitCast(c_long, @as(c_ulong, 0x80004001)); -pub const E_NOINTERFACE = @bitCast(c_long, @as(c_ulong, 0x80004002)); -pub const E_POINTER = @bitCast(c_long, @as(c_ulong, 0x80004003)); -pub const E_ABORT = @bitCast(c_long, @as(c_ulong, 0x80004004)); -pub const E_FAIL = @bitCast(c_long, @as(c_ulong, 0x80004005)); -pub const E_UNEXPECTED = @bitCast(c_long, @as(c_ulong, 0x8000FFFF)); -pub const E_ACCESSDENIED = @bitCast(c_long, @as(c_ulong, 0x80070005)); -pub const E_HANDLE = @bitCast(c_long, @as(c_ulong, 0x80070006)); -pub const E_OUTOFMEMORY = @bitCast(c_long, @as(c_ulong, 0x8007000E)); -pub const E_INVALIDARG = @bitCast(c_long, @as(c_ulong, 0x80070057)); +pub const E_NOTIMPL = @as(c_long, @bitCast(@as(c_ulong, 0x80004001))); +pub const E_NOINTERFACE = @as(c_long, @bitCast(@as(c_ulong, 0x80004002))); +pub const E_POINTER = @as(c_long, @bitCast(@as(c_ulong, 0x80004003))); +pub const E_ABORT = @as(c_long, @bitCast(@as(c_ulong, 0x80004004))); +pub const E_FAIL = @as(c_long, @bitCast(@as(c_ulong, 0x80004005))); +pub const E_UNEXPECTED = @as(c_long, @bitCast(@as(c_ulong, 0x8000FFFF))); +pub const E_ACCESSDENIED = @as(c_long, @bitCast(@as(c_ulong, 0x80070005))); +pub const E_HANDLE = @as(c_long, @bitCast(@as(c_ulong, 0x80070006))); +pub const E_OUTOFMEMORY = @as(c_long, @bitCast(@as(c_ulong, 0x8007000E))); +pub const E_INVALIDARG = @as(c_long, @bitCast(@as(c_ulong, 0x80070057))); pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000; pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000; @@ -3221,7 +3221,7 @@ pub const LSTATUS = LONG; pub const HKEY = *opaque {}; -pub const HKEY_LOCAL_MACHINE: HKEY = @ptrFromInt(HKEY, 0x80000002); +pub const HKEY_LOCAL_MACHINE: HKEY = @as(HKEY, @ptrFromInt(0x80000002)); /// Combines the STANDARD_RIGHTS_REQUIRED, KEY_QUERY_VALUE, KEY_SET_VALUE, KEY_CREATE_SUB_KEY, /// KEY_ENUMERATE_SUB_KEYS, KEY_NOTIFY, and KEY_CREATE_LINK access rights. @@ -4685,7 +4685,7 @@ pub const KUSER_SHARED_DATA = extern struct { /// Read-only user-mode address for the shared data. /// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm /// https://msrc-blog.microsoft.com/2022/04/05/randomizing-the-kuser_shared_data-structure-on-windows/ -pub const SharedUserData: *const KUSER_SHARED_DATA = @ptrFromInt(*const KUSER_SHARED_DATA, 0x7FFE0000); +pub const SharedUserData: *const KUSER_SHARED_DATA = @as(*const KUSER_SHARED_DATA, @ptrFromInt(0x7FFE0000)); pub fn IsProcessorFeaturePresent(feature: PF) bool { if (@intFromEnum(feature) >= PROCESSOR_FEATURE_MAX) return false; @@ -4886,7 +4886,7 @@ pub fn WriteProcessMemory(handle: HANDLE, addr: ?LPVOID, buffer: []const u8) Wri switch (ntdll.NtWriteVirtualMemory( handle, addr, - @ptrCast(*const anyopaque, buffer.ptr), + @as(*const anyopaque, @ptrCast(buffer.ptr)), buffer.len, &nwritten, )) { @@ -4919,6 +4919,6 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE { var peb_buf: [@sizeOf(PEB)]u8 align(@alignOf(PEB)) = undefined; const peb_out = try ReadProcessMemory(handle, info.PebBaseAddress, &peb_buf); - const ppeb = @ptrCast(*const PEB, @alignCast(@alignOf(PEB), peb_out.ptr)); + const ppeb: *const PEB = @ptrCast(@alignCast(peb_out.ptr)); return ppeb.ImageBaseAddress; } diff --git a/lib/std/os/windows/user32.zig b/lib/std/os/windows/user32.zig index 0d6fc2c67037..8c492cee3273 100644 --- a/lib/std/os/windows/user32.zig +++ b/lib/std/os/windows/user32.zig @@ -1275,7 +1275,7 @@ pub const WS_EX_LAYERED = 0x00080000; pub const WS_EX_OVERLAPPEDWINDOW = WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE; pub const WS_EX_PALETTEWINDOW = WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST; -pub const CW_USEDEFAULT = @bitCast(i32, @as(u32, 0x80000000)); +pub const CW_USEDEFAULT = @as(i32, @bitCast(@as(u32, 0x80000000))); pub extern "user32" fn CreateWindowExA(dwExStyle: DWORD, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: DWORD, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?LPVOID) callconv(WINAPI) ?HWND; pub fn createWindowExA(dwExStyle: u32, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: u32, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?*anyopaque) !HWND { diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig index 821b903a34bb..240c8c849d03 100644 --- a/lib/std/os/windows/ws2_32.zig +++ b/lib/std/os/windows/ws2_32.zig @@ -21,7 +21,7 @@ const LPARAM = windows.LPARAM; const FARPROC = windows.FARPROC; pub const SOCKET = *opaque {}; -pub const INVALID_SOCKET = @ptrFromInt(SOCKET, ~@as(usize, 0)); +pub const INVALID_SOCKET = @as(SOCKET, @ptrFromInt(~@as(usize, 0))); pub const GROUP = u32; pub const ADDRESS_FAMILY = u16; diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig index 10d8af057513..cff9eb8cf15b 100644 --- a/lib/std/packed_int_array.zig +++ b/lib/std/packed_int_array.zig @@ -73,25 +73,25 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { const tail_keep_bits = container_bits - (int_bits + head_keep_bits); //read bytes as container - const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]); + const value_ptr = @as(*align(1) const Container, @ptrCast(&bytes[start_byte])); var value = value_ptr.*; if (endian != native_endian) value = @byteSwap(value); switch (endian) { .Big => { - value <<= @intCast(Shift, head_keep_bits); - value >>= @intCast(Shift, head_keep_bits); - value >>= @intCast(Shift, tail_keep_bits); + value <<= @as(Shift, @intCast(head_keep_bits)); + value >>= @as(Shift, @intCast(head_keep_bits)); + value >>= @as(Shift, @intCast(tail_keep_bits)); }, .Little => { - value <<= @intCast(Shift, tail_keep_bits); - value >>= @intCast(Shift, tail_keep_bits); - value >>= @intCast(Shift, head_keep_bits); + value <<= @as(Shift, @intCast(tail_keep_bits)); + value >>= @as(Shift, @intCast(tail_keep_bits)); + value >>= @as(Shift, @intCast(head_keep_bits)); }, } - return @bitCast(Int, @truncate(UnInt, value)); + return @as(Int, @bitCast(@as(UnInt, @truncate(value)))); } /// Sets the integer at `index` to `val` within the packed data beginning @@ -115,21 +115,21 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { const head_keep_bits = bit_index - (start_byte * 8); const tail_keep_bits = container_bits - (int_bits + head_keep_bits); const keep_shift = switch (endian) { - .Big => @intCast(Shift, tail_keep_bits), - .Little => @intCast(Shift, head_keep_bits), + .Big => @as(Shift, @intCast(tail_keep_bits)), + .Little => @as(Shift, @intCast(head_keep_bits)), }; //position the bits where they need to be in the container - const value = @intCast(Container, @bitCast(UnInt, int)) << keep_shift; + const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift; //read existing bytes - const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]); + const target_ptr = @as(*align(1) Container, @ptrCast(&bytes[start_byte])); var target = target_ptr.*; if (endian != native_endian) target = @byteSwap(target); //zero the bits we want to replace in the existing bytes - const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift; + const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift; const mask = ~inv_mask; target &= mask; @@ -156,7 +156,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0); var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length); - new_slice.bit_offset = @intCast(u3, (bit_index - (start_byte * 8))); + new_slice.bit_offset = @as(u3, @intCast((bit_index - (start_byte * 8)))); return new_slice; } @@ -398,7 +398,7 @@ test "PackedIntArray init" { const PackedArray = PackedIntArray(u3, 8); var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 }); var i = @as(usize, 0); - while (i < packed_array.len) : (i += 1) try testing.expectEqual(@intCast(u3, i), packed_array.get(i)); + while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i)); } test "PackedIntArray initAllTo" { @@ -469,7 +469,7 @@ test "PackedIntSlice of PackedInt(Array/Slice)" { var i = @as(usize, 0); while (i < packed_array.len) : (i += 1) { - packed_array.set(i, @intCast(Int, i % limit)); + packed_array.set(i, @as(Int, @intCast(i % limit))); } //slice of array diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 25a6786ec68b..4d71ce210385 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -573,7 +573,7 @@ pub const Pdb = struct { if (this_record_len % 4 != 0) { const round_to_next_4 = (this_record_len | 0x3) + 1; const march_forward_bytes = round_to_next_4 - this_record_len; - try stream.seekBy(@intCast(isize, march_forward_bytes)); + try stream.seekBy(@as(isize, @intCast(march_forward_bytes))); this_record_len += march_forward_bytes; } @@ -689,14 +689,14 @@ pub const Pdb = struct { var symbol_i: usize = 0; while (symbol_i != module.symbols.len) { - const prefix = @ptrCast(*align(1) RecordPrefix, &module.symbols[symbol_i]); + const prefix = @as(*align(1) RecordPrefix, @ptrCast(&module.symbols[symbol_i])); if (prefix.RecordLen < 2) return null; switch (prefix.RecordKind) { .S_LPROC32, .S_GPROC32 => { - const proc_sym = @ptrCast(*align(1) ProcSym, &module.symbols[symbol_i + @sizeOf(RecordPrefix)]); + const proc_sym = @as(*align(1) ProcSym, @ptrCast(&module.symbols[symbol_i + @sizeOf(RecordPrefix)])); if (address >= proc_sym.CodeOffset and address < proc_sym.CodeOffset + proc_sym.CodeSize) { - return mem.sliceTo(@ptrCast([*:0]u8, &proc_sym.Name[0]), 0); + return mem.sliceTo(@as([*:0]u8, @ptrCast(&proc_sym.Name[0])), 0); } }, else => {}, @@ -715,7 +715,7 @@ pub const Pdb = struct { var skip_len: usize = undefined; const checksum_offset = module.checksum_offset orelse return error.MissingDebugInfo; while (sect_offset != subsect_info.len) : (sect_offset += skip_len) { - const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &subsect_info[sect_offset]); + const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&subsect_info[sect_offset])); skip_len = subsect_hdr.Length; sect_offset += @sizeOf(DebugSubsectionHeader); @@ -723,7 +723,7 @@ pub const Pdb = struct { .Lines => { var line_index = sect_offset; - const line_hdr = @ptrCast(*align(1) LineFragmentHeader, &subsect_info[line_index]); + const line_hdr = @as(*align(1) LineFragmentHeader, @ptrCast(&subsect_info[line_index])); if (line_hdr.RelocSegment == 0) return error.MissingDebugInfo; line_index += @sizeOf(LineFragmentHeader); @@ -737,7 +737,7 @@ pub const Pdb = struct { const subsection_end_index = sect_offset + subsect_hdr.Length; while (line_index < subsection_end_index) { - const block_hdr = @ptrCast(*align(1) LineBlockFragmentHeader, &subsect_info[line_index]); + const block_hdr = @as(*align(1) LineBlockFragmentHeader, @ptrCast(&subsect_info[line_index])); line_index += @sizeOf(LineBlockFragmentHeader); const start_line_index = line_index; @@ -749,7 +749,7 @@ pub const Pdb = struct { // This is done with a simple linear search. var line_i: u32 = 0; while (line_i < block_hdr.NumLines) : (line_i += 1) { - const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[line_index]); + const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[line_index])); line_index += @sizeOf(LineNumberEntry); const vaddr_start = frag_vaddr_start + line_num_entry.Offset; @@ -761,7 +761,7 @@ pub const Pdb = struct { // line_i == 0 would mean that no matching LineNumberEntry was found. if (line_i > 0) { const subsect_index = checksum_offset + block_hdr.NameIndex; - const chksum_hdr = @ptrCast(*align(1) FileChecksumEntryHeader, &module.subsect_info[subsect_index]); + const chksum_hdr = @as(*align(1) FileChecksumEntryHeader, @ptrCast(&module.subsect_info[subsect_index])); const strtab_offset = @sizeOf(PDBStringTableHeader) + chksum_hdr.FileNameOffset; try self.string_table.?.seekTo(strtab_offset); const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024); @@ -771,13 +771,13 @@ pub const Pdb = struct { const column = if (has_column) blk: { const start_col_index = start_line_index + @sizeOf(LineNumberEntry) * block_hdr.NumLines; const col_index = start_col_index + @sizeOf(ColumnNumberEntry) * line_entry_idx; - const col_num_entry = @ptrCast(*align(1) ColumnNumberEntry, &subsect_info[col_index]); + const col_num_entry = @as(*align(1) ColumnNumberEntry, @ptrCast(&subsect_info[col_index])); break :blk col_num_entry.StartColumn; } else 0; const found_line_index = start_line_index + line_entry_idx * @sizeOf(LineNumberEntry); - const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[found_line_index]); - const flags = @ptrCast(*LineNumberEntry.Flags, &line_num_entry.Flags); + const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[found_line_index])); + const flags = @as(*LineNumberEntry.Flags, @ptrCast(&line_num_entry.Flags)); return debug.LineInfo{ .file_name = source_file_name, @@ -836,7 +836,7 @@ pub const Pdb = struct { var sect_offset: usize = 0; var skip_len: usize = undefined; while (sect_offset != mod.subsect_info.len) : (sect_offset += skip_len) { - const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &mod.subsect_info[sect_offset]); + const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&mod.subsect_info[sect_offset])); skip_len = subsect_hdr.Length; sect_offset += @sizeOf(DebugSubsectionHeader); @@ -1038,7 +1038,7 @@ const MsfStream = struct { } fn read(self: *MsfStream, buffer: []u8) !usize { - var block_id = @intCast(usize, self.pos / self.block_size); + var block_id = @as(usize, @intCast(self.pos / self.block_size)); if (block_id >= self.blocks.len) return 0; // End of Stream var block = self.blocks[block_id]; var offset = self.pos % self.block_size; @@ -1069,7 +1069,7 @@ const MsfStream = struct { } pub fn seekBy(self: *MsfStream, len: i64) !void { - self.pos = @intCast(u64, @intCast(i64, self.pos) + len); + self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len)); if (self.pos >= self.blocks.len * self.block_size) return error.EOF; } diff --git a/lib/std/process.zig b/lib/std/process.zig index 05066fa4361d..28d4bfcb25c0 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -68,7 +68,7 @@ pub const EnvMap = struct { pub const EnvNameHashContext = struct { fn upcase(c: u21) u21 { if (c <= std.math.maxInt(u16)) - return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@intCast(u16, c)); + return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@as(u16, @intCast(c))); return c; } @@ -80,9 +80,9 @@ pub const EnvMap = struct { while (it.nextCodepoint()) |cp| { const cp_upper = upcase(cp); h.update(&[_]u8{ - @intCast(u8, (cp_upper >> 16) & 0xff), - @intCast(u8, (cp_upper >> 8) & 0xff), - @intCast(u8, (cp_upper >> 0) & 0xff), + @as(u8, @intCast((cp_upper >> 16) & 0xff)), + @as(u8, @intCast((cp_upper >> 8) & 0xff)), + @as(u8, @intCast((cp_upper >> 0) & 0xff)), }); } return h.final(); @@ -872,8 +872,8 @@ pub fn argsFree(allocator: Allocator, args_alloc: []const [:0]u8) void { for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len + 1; } - const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes]; - const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf); + const unaligned_allocated_buf = @as([*]const u8, @ptrCast(args_alloc.ptr))[0..total_bytes]; + const aligned_allocated_buf: []align(@alignOf([]u8)) const u8 = @alignCast(unaligned_allocated_buf); return allocator.free(aligned_allocated_buf); } @@ -1143,7 +1143,7 @@ pub fn execve( } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); + break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr)); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); @@ -1175,7 +1175,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize { error.NameTooLong, error.UnknownName => unreachable, else => return error.UnknownTotalSystemMemory, }; - return @intCast(usize, physmem); + return @as(usize, @intCast(physmem)); }, .openbsd => { const mib: [2]c_int = [_]c_int{ @@ -1192,7 +1192,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize { else => return error.UnknownTotalSystemMemory, }; assert(physmem >= 0); - return @bitCast(usize, physmem); + return @as(usize, @bitCast(physmem)); }, .windows => { var sbi: std.os.windows.SYSTEM_BASIC_INFORMATION = undefined; diff --git a/lib/std/rand.zig b/lib/std/rand.zig index f07562c91167..84dc9d2daf77 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -41,8 +41,7 @@ pub const Random = struct { assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct const gen = struct { fn fill(ptr: *anyopaque, buf: []u8) void { - const alignment = @typeInfo(Ptr).Pointer.alignment; - const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); + const self: Ptr = @ptrCast(@alignCast(ptr)); fillFn(self, buf); } }; @@ -97,7 +96,7 @@ pub const Random = struct { r.uintLessThan(Index, values.len); const MinInt = MinArrayIndex(Index); - return values[@intCast(MinInt, index)]; + return values[@as(MinInt, @intCast(index))]; } /// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`. @@ -114,8 +113,8 @@ pub const Random = struct { // TODO: endian portability is pointless if the underlying prng isn't endian portable. // TODO: document the endian portability of this library. const byte_aligned_result = mem.readIntSliceLittle(ByteAlignedT, &rand_bytes); - const unsigned_result = @truncate(UnsignedT, byte_aligned_result); - return @bitCast(T, unsigned_result); + const unsigned_result = @as(UnsignedT, @truncate(byte_aligned_result)); + return @as(T, @bitCast(unsigned_result)); } /// Constant-time implementation off `uintLessThan`. @@ -126,9 +125,9 @@ pub const Random = struct { comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! assert(0 < less_than); if (bits <= 32) { - return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than)); + return @as(T, @intCast(limitRangeBiased(u32, r.int(u32), less_than))); } else { - return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than)); + return @as(T, @intCast(limitRangeBiased(u64, r.int(u64), less_than))); } } @@ -156,7 +155,7 @@ pub const Random = struct { // "Lemire's (with an extra tweak from me)" var x: Small = r.int(Small); var m: Large = @as(Large, x) * @as(Large, less_than); - var l: Small = @truncate(Small, m); + var l: Small = @as(Small, @truncate(m)); if (l < less_than) { var t: Small = -%less_than; @@ -169,10 +168,10 @@ pub const Random = struct { while (l < t) { x = r.int(Small); m = @as(Large, x) * @as(Large, less_than); - l = @truncate(Small, m); + l = @as(Small, @truncate(m)); } } - return @intCast(T, m >> small_bits); + return @as(T, @intCast(m >> small_bits)); } /// Constant-time implementation off `uintAtMost`. @@ -206,10 +205,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, less_than); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(less_than)); const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintLessThanBiased(T, less_than - at_least); @@ -225,10 +224,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, less_than); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(less_than)); const result = lo +% r.uintLessThan(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintLessThan(T, less_than - at_least); @@ -243,10 +242,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, at_most); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(at_most)); const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintAtMostBiased(T, at_most - at_least); @@ -262,10 +261,10 @@ pub const Random = struct { if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); - const lo = @bitCast(UnsignedT, at_least); - const hi = @bitCast(UnsignedT, at_most); + const lo = @as(UnsignedT, @bitCast(at_least)); + const hi = @as(UnsignedT, @bitCast(at_most)); const result = lo +% r.uintAtMost(UnsignedT, hi -% lo); - return @bitCast(T, result); + return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintAtMost(T, at_most - at_least); @@ -294,9 +293,9 @@ pub const Random = struct { rand_lz += @clz(r.int(u32) | 0x7FF); } } - const mantissa = @truncate(u23, rand); + const mantissa = @as(u23, @truncate(rand)); const exponent = @as(u32, 126 - rand_lz) << 23; - return @bitCast(f32, exponent | mantissa); + return @as(f32, @bitCast(exponent | mantissa)); }, f64 => { // Use 52 random bits for the mantissa, and the rest for the exponent. @@ -321,7 +320,7 @@ pub const Random = struct { } const mantissa = rand & 0xFFFFFFFFFFFFF; const exponent = (1022 - rand_lz) << 52; - return @bitCast(f64, exponent | mantissa); + return @as(f64, @bitCast(exponent | mantissa)); }, else => @compileError("unknown floating point type"), } @@ -333,7 +332,7 @@ pub const Random = struct { pub fn floatNorm(r: Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.NormDist); switch (T) { - f32 => return @floatCast(f32, value), + f32 => return @as(f32, @floatCast(value)), f64 => return value, else => @compileError("unknown floating point type"), } @@ -345,7 +344,7 @@ pub const Random = struct { pub fn floatExp(r: Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.ExpDist); switch (T) { - f32 => return @floatCast(f32, value), + f32 => return @as(f32, @floatCast(value)), f64 => return value, else => @compileError("unknown floating point type"), } @@ -379,10 +378,10 @@ pub const Random = struct { } // `i <= j < max <= maxInt(MinInt)` - const max = @intCast(MinInt, buf.len); + const max = @as(MinInt, @intCast(buf.len)); var i: MinInt = 0; while (i < max - 1) : (i += 1) { - const j = @intCast(MinInt, r.intRangeLessThan(Index, i, max)); + const j = @as(MinInt, @intCast(r.intRangeLessThan(Index, i, max))); mem.swap(T, &buf[i], &buf[j]); } } @@ -445,7 +444,7 @@ pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T { // http://www.pcg-random.org/posts/bounded-rands.html // "Integer Multiplication (Biased)" var m: T2 = @as(T2, random_int) * @as(T2, less_than); - return @intCast(T, m >> bits); + return @as(T, @intCast(m >> bits)); } // Generator to extend 64-bit seed values into longer sequences. diff --git a/lib/std/rand/Isaac64.zig b/lib/std/rand/Isaac64.zig index 8c6205e1cd9a..785c551dfdf6 100644 --- a/lib/std/rand/Isaac64.zig +++ b/lib/std/rand/Isaac64.zig @@ -38,10 +38,10 @@ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: const x = self.m[base + m1]; self.a = mix +% self.m[base + m2]; - const y = self.a +% self.b +% self.m[@intCast(usize, (x >> 3) % self.m.len)]; + const y = self.a +% self.b +% self.m[@as(usize, @intCast((x >> 3) % self.m.len))]; self.m[base + m1] = y; - self.b = x +% self.m[@intCast(usize, (y >> 11) % self.m.len)]; + self.b = x +% self.m[@as(usize, @intCast((y >> 11) % self.m.len))]; self.r[self.r.len - 1 - base - m1] = self.b; } @@ -159,7 +159,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -168,7 +168,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/Pcg.zig b/lib/std/rand/Pcg.zig index 951713cc40ff..ceeadeab5c10 100644 --- a/lib/std/rand/Pcg.zig +++ b/lib/std/rand/Pcg.zig @@ -29,10 +29,10 @@ fn next(self: *Pcg) u32 { const l = self.s; self.s = l *% default_multiplier +% (self.i | 1); - const xor_s = @truncate(u32, ((l >> 18) ^ l) >> 27); - const rot = @intCast(u32, l >> 59); + const xor_s = @as(u32, @truncate(((l >> 18) ^ l) >> 27)); + const rot = @as(u32, @intCast(l >> 59)); - return (xor_s >> @intCast(u5, rot)) | (xor_s << @intCast(u5, (0 -% rot) & 31)); + return (xor_s >> @as(u5, @intCast(rot))) | (xor_s << @as(u5, @intCast((0 -% rot) & 31))); } fn seed(self: *Pcg, init_s: u64) void { @@ -58,7 +58,7 @@ pub fn fill(self: *Pcg, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 4) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -67,7 +67,7 @@ pub fn fill(self: *Pcg, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/RomuTrio.zig b/lib/std/rand/RomuTrio.zig index ff7b4deac1af..4ce2b7af016b 100644 --- a/lib/std/rand/RomuTrio.zig +++ b/lib/std/rand/RomuTrio.zig @@ -34,7 +34,7 @@ fn next(self: *RomuTrio) u64 { } pub fn seedWithBuf(self: *RomuTrio, buf: [24]u8) void { - const seed_buf = @bitCast([3]u64, buf); + const seed_buf = @as([3]u64, @bitCast(buf)); self.x_state = seed_buf[0]; self.y_state = seed_buf[1]; self.z_state = seed_buf[2]; @@ -58,7 +58,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -67,7 +67,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } @@ -122,7 +122,7 @@ test "RomuTrio fill" { } test "RomuTrio buf seeding test" { - const buf0 = @bitCast([24]u8, [3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 }); + const buf0 = @as([24]u8, @bitCast([3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 })); const resulting_state = .{ .x = 16294208416658607535, .y = 13964609475759908645, .z = 4703697494102998476 }; var r = RomuTrio.init(0); r.seedWithBuf(buf0); diff --git a/lib/std/rand/Sfc64.zig b/lib/std/rand/Sfc64.zig index a5e6920df723..af439b115b72 100644 --- a/lib/std/rand/Sfc64.zig +++ b/lib/std/rand/Sfc64.zig @@ -56,7 +56,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -65,7 +65,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/Xoroshiro128.zig b/lib/std/rand/Xoroshiro128.zig index 6ddd2eb89e55..56c4980e6d50 100644 --- a/lib/std/rand/Xoroshiro128.zig +++ b/lib/std/rand/Xoroshiro128.zig @@ -45,7 +45,7 @@ pub fn jump(self: *Xoroshiro128) void { inline for (table) |entry| { var b: usize = 0; while (b < 64) : (b += 1) { - if ((entry & (@as(u64, 1) << @intCast(u6, b))) != 0) { + if ((entry & (@as(u64, 1) << @as(u6, @intCast(b)))) != 0) { s0 ^= self.s[0]; s1 ^= self.s[1]; } @@ -74,7 +74,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -83,7 +83,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/Xoshiro256.zig b/lib/std/rand/Xoshiro256.zig index 35af701ea123..c72d9ee1a251 100644 --- a/lib/std/rand/Xoshiro256.zig +++ b/lib/std/rand/Xoshiro256.zig @@ -46,13 +46,13 @@ pub fn jump(self: *Xoshiro256) void { var table: u256 = 0x39abdc4529b1661ca9582618e03fc9aad5a61266f0c9392c180ec6d33cfd0aba; while (table != 0) : (table >>= 1) { - if (@truncate(u1, table) != 0) { - s ^= @bitCast(u256, self.s); + if (@as(u1, @truncate(table)) != 0) { + s ^= @as(u256, @bitCast(self.s)); } _ = self.next(); } - self.s = @bitCast([4]u64, s); + self.s = @as([4]u64, @bitCast(s)); } pub fn seed(self: *Xoshiro256, init_s: u64) void { @@ -74,7 +74,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { - buf[i + j] = @truncate(u8, n); + buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } @@ -83,7 +83,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void { if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { - buf[i] = @truncate(u8, n); + buf[i] = @as(u8, @truncate(n)); n >>= 8; } } diff --git a/lib/std/rand/benchmark.zig b/lib/std/rand/benchmark.zig index ea3de9c70d9c..530556517c67 100644 --- a/lib/std/rand/benchmark.zig +++ b/lib/std/rand/benchmark.zig @@ -91,8 +91,8 @@ pub fn benchmark(comptime H: anytype, bytes: usize, comptime block_size: usize) } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); std.debug.assert(rng.random().int(u64) != 0); diff --git a/lib/std/rand/test.zig b/lib/std/rand/test.zig index 6cc6891c5aa0..551e47f8ff78 100644 --- a/lib/std/rand/test.zig +++ b/lib/std/rand/test.zig @@ -332,13 +332,13 @@ test "Random float chi-square goodness of fit" { while (i < num_numbers) : (i += 1) { const rand_f32 = random.float(f32); const rand_f64 = random.float(f64); - var f32_put = try f32_hist.getOrPut(@intFromFloat(u32, rand_f32 * @floatFromInt(f32, num_buckets))); + var f32_put = try f32_hist.getOrPut(@as(u32, @intFromFloat(rand_f32 * @as(f32, @floatFromInt(num_buckets))))); if (f32_put.found_existing) { f32_put.value_ptr.* += 1; } else { f32_put.value_ptr.* = 1; } - var f64_put = try f64_hist.getOrPut(@intFromFloat(u32, rand_f64 * @floatFromInt(f64, num_buckets))); + var f64_put = try f64_hist.getOrPut(@as(u32, @intFromFloat(rand_f64 * @as(f64, @floatFromInt(num_buckets))))); if (f64_put.found_existing) { f64_put.value_ptr.* += 1; } else { @@ -352,8 +352,8 @@ test "Random float chi-square goodness of fit" { { var j: u32 = 0; while (j < num_buckets) : (j += 1) { - const count = @floatFromInt(f64, (if (f32_hist.get(j)) |v| v else 0)); - const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets); + const count = @as(f64, @floatFromInt((if (f32_hist.get(j)) |v| v else 0))); + const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets)); const delta = count - expected; const variance = (delta * delta) / expected; f32_total_variance += variance; @@ -363,8 +363,8 @@ test "Random float chi-square goodness of fit" { { var j: u64 = 0; while (j < num_buckets) : (j += 1) { - const count = @floatFromInt(f64, (if (f64_hist.get(j)) |v| v else 0)); - const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets); + const count = @as(f64, @floatFromInt((if (f64_hist.get(j)) |v| v else 0))); + const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets)); const delta = count - expected; const variance = (delta * delta) / expected; f64_total_variance += variance; @@ -421,13 +421,13 @@ fn testRange(r: Random, start: i8, end: i8) !void { try testRangeBias(r, start, end, false); } fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void { - const count = @intCast(usize, @as(i32, end) - @as(i32, start)); + const count = @as(usize, @intCast(@as(i32, end) - @as(i32, start))); var values_buffer = [_]bool{false} ** 0x100; const values = values_buffer[0..count]; var i: usize = 0; while (i < count) { const value: i32 = if (biased) r.intRangeLessThanBiased(i8, start, end) else r.intRangeLessThan(i8, start, end); - const index = @intCast(usize, value - start); + const index = @as(usize, @intCast(value - start)); if (!values[index]) { i += 1; values[index] = true; diff --git a/lib/std/rand/ziggurat.zig b/lib/std/rand/ziggurat.zig index afe00a1348b3..09d695b88d6d 100644 --- a/lib/std/rand/ziggurat.zig +++ b/lib/std/rand/ziggurat.zig @@ -18,17 +18,17 @@ pub fn next_f64(random: Random, comptime tables: ZigTable) f64 { // We manually construct a float from parts as we can avoid an extra random lookup here by // using the unused exponent for the lookup table entry. const bits = random.int(u64); - const i = @as(usize, @truncate(u8, bits)); + const i = @as(usize, @as(u8, @truncate(bits))); const u = blk: { if (tables.is_symmetric) { // Generate a value in the range [2, 4) and scale into [-1, 1) const repr = ((0x3ff + 1) << 52) | (bits >> 12); - break :blk @bitCast(f64, repr) - 3.0; + break :blk @as(f64, @bitCast(repr)) - 3.0; } else { // Generate a value in the range [1, 2) and scale into (0, 1) const repr = (0x3ff << 52) | (bits >> 12); - break :blk @bitCast(f64, repr) - (1.0 - math.floatEps(f64) / 2.0); + break :blk @as(f64, @bitCast(repr)) - (1.0 - math.floatEps(f64) / 2.0); } }; diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig index 172fe4e7c382..1c9cffa766fb 100644 --- a/lib/std/segmented_list.zig +++ b/lib/std/segmented_list.zig @@ -107,7 +107,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } pub fn deinit(self: *Self, allocator: Allocator) void { - self.freeShelves(allocator, @intCast(ShelfIndex, self.dynamic_segments.len), 0); + self.freeShelves(allocator, @as(ShelfIndex, @intCast(self.dynamic_segments.len)), 0); allocator.free(self.dynamic_segments); self.* = undefined; } @@ -171,7 +171,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type /// TODO update this and related methods to match the conventions set by ArrayList pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { if (prealloc_item_count != 0) { - if (new_capacity <= @as(usize, 1) << (prealloc_exp + @intCast(ShelfIndex, self.dynamic_segments.len))) { + if (new_capacity <= @as(usize, 1) << (prealloc_exp + @as(ShelfIndex, @intCast(self.dynamic_segments.len)))) { return self.shrinkCapacity(allocator, new_capacity); } } @@ -181,7 +181,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type /// Only grows capacity, or retains current capacity. pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void { const new_cap_shelf_count = shelfCount(new_capacity); - const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); + const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len)); if (new_cap_shelf_count <= old_shelf_count) return; const new_dynamic_segments = try allocator.alloc([*]T, new_cap_shelf_count); @@ -206,7 +206,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type /// It may fail to reduce the capacity in which case the capacity will remain unchanged. pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void { if (new_capacity <= prealloc_item_count) { - const len = @intCast(ShelfIndex, self.dynamic_segments.len); + const len = @as(ShelfIndex, @intCast(self.dynamic_segments.len)); self.freeShelves(allocator, len, 0); allocator.free(self.dynamic_segments); self.dynamic_segments = &[_][*]T{}; @@ -214,7 +214,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } const new_cap_shelf_count = shelfCount(new_capacity); - const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len); + const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len)); assert(new_cap_shelf_count <= old_shelf_count); if (new_cap_shelf_count == old_shelf_count) return; @@ -424,7 +424,7 @@ fn testSegmentedList(comptime prealloc: usize) !void { { var i: usize = 0; while (i < 100) : (i += 1) { - try list.append(testing.allocator, @intCast(i32, i + 1)); + try list.append(testing.allocator, @as(i32, @intCast(i + 1))); try testing.expect(list.len == i + 1); } } @@ -432,7 +432,7 @@ fn testSegmentedList(comptime prealloc: usize) !void { { var i: usize = 0; while (i < 100) : (i += 1) { - try testing.expect(list.at(i).* == @intCast(i32, i + 1)); + try testing.expect(list.at(i).* == @as(i32, @intCast(i + 1))); } } @@ -492,7 +492,7 @@ fn testSegmentedList(comptime prealloc: usize) !void { var i: i32 = 0; while (i < 100) : (i += 1) { try list.append(testing.allocator, i + 1); - control[@intCast(usize, i)] = i + 1; + control[@as(usize, @intCast(i))] = i + 1; } @memset(dest[0..], 0); diff --git a/lib/std/simd.zig b/lib/std/simd.zig index 78d24a80bfa6..b3a50168ff0f 100644 --- a/lib/std/simd.zig +++ b/lib/std/simd.zig @@ -93,8 +93,8 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) { var out: [len]T = undefined; for (&out, 0..) |*element, i| { element.* = switch (@typeInfo(T)) { - .Int => @intCast(T, i), - .Float => @floatFromInt(T, i), + .Int => @as(T, @intCast(i)), + .Float => @as(T, @floatFromInt(i)), else => @compileError("Can't use type " ++ @typeName(T) ++ " in iota."), }; } @@ -107,7 +107,7 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) { pub fn repeat(comptime len: usize, vec: anytype) @Vector(len, std.meta.Child(@TypeOf(vec))) { const Child = std.meta.Child(@TypeOf(vec)); - return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @intCast(i32, vectorLength(@TypeOf(vec))))); + return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @as(i32, @intCast(vectorLength(@TypeOf(vec)))))); } /// Returns a vector containing all elements of the first vector at the lower indices followed by all elements of the second vector @@ -139,8 +139,8 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le const a_vec_count = (1 + vecs_arr.len) >> 1; const b_vec_count = vecs_arr.len >> 1; - const a = interlace(@ptrCast(*const [a_vec_count]VecType, vecs_arr[0..a_vec_count]).*); - const b = interlace(@ptrCast(*const [b_vec_count]VecType, vecs_arr[a_vec_count..]).*); + const a = interlace(@as(*const [a_vec_count]VecType, @ptrCast(vecs_arr[0..a_vec_count])).*); + const b = interlace(@as(*const [b_vec_count]VecType, @ptrCast(vecs_arr[a_vec_count..])).*); const a_len = vectorLength(@TypeOf(a)); const b_len = vectorLength(@TypeOf(b)); @@ -148,10 +148,10 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le const indices = comptime blk: { const count_up = iota(i32, len); - const cycle = @divFloor(count_up, @splat(len, @intCast(i32, vecs_arr.len))); + const cycle = @divFloor(count_up, @splat(len, @as(i32, @intCast(vecs_arr.len)))); const select_mask = repeat(len, join(@splat(a_vec_count, true), @splat(b_vec_count, false))); - const a_indices = count_up - cycle * @splat(len, @intCast(i32, b_vec_count)); - const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @intCast(i32, a_vec_count)), a_vec_count, 0); + const a_indices = count_up - cycle * @splat(len, @as(i32, @intCast(b_vec_count))); + const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @as(i32, @intCast(a_vec_count))), a_vec_count, 0); break :blk @select(i32, select_mask, a_indices, ~b_indices); }; @@ -174,7 +174,7 @@ pub fn deinterlace( comptime var i: usize = 0; // for-loops don't work for this, apparently. inline while (i < out.len) : (i += 1) { - const indices = comptime iota(i32, vec_len) * @splat(vec_len, @intCast(i32, vec_count)) + @splat(vec_len, @intCast(i32, i)); + const indices = comptime iota(i32, vec_len) * @splat(vec_len, @as(i32, @intCast(vec_count))) + @splat(vec_len, @as(i32, @intCast(i))); out[i] = @shuffle(Child, interlaced, undefined, indices); } @@ -189,9 +189,9 @@ pub fn extract( const Child = std.meta.Child(@TypeOf(vec)); const len = vectorLength(@TypeOf(vec)); - std.debug.assert(@intCast(comptime_int, first) + @intCast(comptime_int, count) <= len); + std.debug.assert(@as(comptime_int, @intCast(first)) + @as(comptime_int, @intCast(count)) <= len); - return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @intCast(i32, first))); + return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @as(i32, @intCast(first)))); } test "vector patterns" { @@ -263,7 +263,7 @@ pub fn reverseOrder(vec: anytype) @TypeOf(vec) { const Child = std.meta.Child(@TypeOf(vec)); const len = vectorLength(@TypeOf(vec)); - return @shuffle(Child, vec, undefined, @splat(len, @intCast(i32, len) - 1) - iota(i32, len)); + return @shuffle(Child, vec, undefined, @splat(len, @as(i32, @intCast(len)) - 1) - iota(i32, len)); } test "vector shifting" { diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig index 23678a79c6b6..795dd29fc5df 100644 --- a/lib/std/sort/pdq.zig +++ b/lib/std/sort/pdq.zig @@ -251,7 +251,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void { const len = b - a; if (len < 8) return; - var rand = @intCast(u64, len); + var rand = @as(u64, @intCast(len)); const modulus = math.ceilPowerOfTwoAssert(u64, len); var i = a + (len / 4) * 2 - 1; @@ -261,7 +261,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void { rand ^= rand >> 7; rand ^= rand << 17; - var other = @intCast(usize, rand & (modulus - 1)); + var other = @as(usize, @intCast(rand & (modulus - 1))); if (other >= len) other -= len; context.swap(i, a + other); } diff --git a/lib/std/start.zig b/lib/std/start.zig index 9c83bd881c81..d81eb4f9e953 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -190,7 +190,7 @@ fn exit2(code: usize) noreturn { else => @compileError("TODO"), }, .windows => { - ExitProcess(@truncate(u32, code)); + ExitProcess(@as(u32, @truncate(code))); }, else => @compileError("TODO"), } @@ -387,23 +387,23 @@ fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn { std.debug.maybeEnableSegfaultHandler(); const result: std.os.windows.INT = initEventLoopAndCallWinMain(); - std.os.windows.kernel32.ExitProcess(@bitCast(std.os.windows.UINT, result)); + std.os.windows.kernel32.ExitProcess(@as(std.os.windows.UINT, @bitCast(result))); } fn posixCallMainAndExit() callconv(.C) noreturn { @setAlignStack(16); const argc = argc_argv_ptr[0]; - const argv = @ptrCast([*][*:0]u8, argc_argv_ptr + 1); + const argv = @as([*][*:0]u8, @ptrCast(argc_argv_ptr + 1)); - const envp_optional = @ptrCast([*:null]?[*:0]u8, @alignCast(@alignOf(usize), argv + argc + 1)); + const envp_optional: [*:null]?[*:0]u8 = @ptrCast(@alignCast(argv + argc + 1)); var envp_count: usize = 0; while (envp_optional[envp_count]) |_| : (envp_count += 1) {} - const envp = @ptrCast([*][*:0]u8, envp_optional)[0..envp_count]; + const envp = @as([*][*:0]u8, @ptrCast(envp_optional))[0..envp_count]; if (native_os == .linux) { // Find the beginning of the auxiliary vector - const auxv = @ptrCast([*]elf.Auxv, @alignCast(@alignOf(usize), envp.ptr + envp_count + 1)); + const auxv: [*]elf.Auxv = @ptrCast(@alignCast(envp.ptr + envp_count + 1)); std.os.linux.elf_aux_maybe = auxv; var at_hwcap: usize = 0; @@ -419,7 +419,7 @@ fn posixCallMainAndExit() callconv(.C) noreturn { else => continue, } } - break :init @ptrFromInt([*]elf.Phdr, at_phdr)[0..at_phnum]; + break :init @as([*]elf.Phdr, @ptrFromInt(at_phdr))[0..at_phnum]; }; // Apply the initial relocations as early as possible in the startup @@ -495,20 +495,20 @@ fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 { fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) callconv(.C) c_int { var env_count: usize = 0; while (c_envp[env_count] != null) : (env_count += 1) {} - const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count]; + const envp = @as([*][*:0]u8, @ptrCast(c_envp))[0..env_count]; if (builtin.os.tag == .linux) { const at_phdr = std.c.getauxval(elf.AT_PHDR); const at_phnum = std.c.getauxval(elf.AT_PHNUM); - const phdrs = (@ptrFromInt([*]elf.Phdr, at_phdr))[0..at_phnum]; + const phdrs = (@as([*]elf.Phdr, @ptrFromInt(at_phdr)))[0..at_phnum]; expandStackSize(phdrs); } - return @call(.always_inline, callMainWithArgs, .{ @intCast(usize, c_argc), @ptrCast([*][*:0]u8, c_argv), envp }); + return @call(.always_inline, callMainWithArgs, .{ @as(usize, @intCast(c_argc)), @as([*][*:0]u8, @ptrCast(c_argv)), envp }); } fn mainWithoutEnv(c_argc: c_int, c_argv: [*][*:0]c_char) callconv(.C) c_int { - std.os.argv = @ptrCast([*][*:0]u8, c_argv)[0..@intCast(usize, c_argc)]; + std.os.argv = @as([*][*:0]u8, @ptrCast(c_argv))[0..@as(usize, @intCast(c_argc))]; return @call(.always_inline, callMain, .{}); } @@ -629,7 +629,7 @@ pub fn callMain() u8 { pub fn call_wWinMain() std.os.windows.INT { const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.params[0].type.?; - const hInstance = @ptrCast(MAIN_HINSTANCE, std.os.windows.kernel32.GetModuleHandleW(null).?); + const hInstance = @as(MAIN_HINSTANCE, @ptrCast(std.os.windows.kernel32.GetModuleHandleW(null).?)); const lpCmdLine = std.os.windows.kernel32.GetCommandLineW(); // There's no (documented) way to get the nCmdShow parameter, so we're diff --git a/lib/std/start_windows_tls.zig b/lib/std/start_windows_tls.zig index a1cd8387dca8..48880b4811d3 100644 --- a/lib/std/start_windows_tls.zig +++ b/lib/std/start_windows_tls.zig @@ -42,7 +42,7 @@ export const _tls_used linksection(".rdata$T") = IMAGE_TLS_DIRECTORY{ .StartAddressOfRawData = &_tls_start, .EndAddressOfRawData = &_tls_end, .AddressOfIndex = &_tls_index, - .AddressOfCallBacks = @ptrCast(*anyopaque, &__xl_a), + .AddressOfCallBacks = @as(*anyopaque, @ptrCast(&__xl_a)), .SizeOfZeroFill = 0, .Characteristics = 0, }; diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 688d0935871d..bc9a22fb7c48 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -70,8 +70,8 @@ pub const Header = struct { } pub fn fileType(header: Header) FileType { - const result = @enumFromInt(FileType, header.bytes[156]); - return if (result == @enumFromInt(FileType, 0)) .normal else result; + const result = @as(FileType, @enumFromInt(header.bytes[156])); + return if (result == @as(FileType, @enumFromInt(0))) .normal else result; } fn str(header: Header, start: usize, end: usize) []const u8 { @@ -117,7 +117,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi start += 512; const file_size = try header.fileSize(); const rounded_file_size = std.mem.alignForward(u64, file_size, 512); - const pad_len = @intCast(usize, rounded_file_size - file_size); + const pad_len = @as(usize, @intCast(rounded_file_size - file_size)); const unstripped_file_name = try header.fullFileName(&file_name_buffer); switch (header.fileType()) { .directory => { @@ -146,14 +146,14 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi } // Ask for the rounded up file size + 512 for the next header. // TODO: https://github.com/ziglang/zig/issues/14039 - const ask = @intCast(usize, @min( + const ask = @as(usize, @intCast(@min( buffer.len - end, rounded_file_size + 512 - file_off -| (end - start), - )); + ))); end += try reader.readAtLeast(buffer[end..], ask); if (end - start < ask) return error.UnexpectedEndOfStream; // TODO: https://github.com/ziglang/zig/issues/14039 - const slice = buffer[start..@intCast(usize, @min(file_size - file_off + start, end))]; + const slice = buffer[start..@as(usize, @intCast(@min(file_size - file_off + start, end)))]; try file.writeAll(slice); file_off += slice.len; start += slice.len; @@ -167,7 +167,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi }, .global_extended_header, .extended_header => { if (start + rounded_file_size > end) return error.TarHeadersTooBig; - start = @intCast(usize, start + rounded_file_size); + start = @as(usize, @intCast(start + rounded_file_size)); }, .hard_link => return error.TarUnsupportedFileType, .symbolic_link => return error.TarUnsupportedFileType, diff --git a/lib/std/target.zig b/lib/std/target.zig index ec6129236072..2a96e84001eb 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -711,14 +711,14 @@ pub const Target = struct { pub fn isEnabled(set: Set, arch_feature_index: Index) bool { const usize_index = arch_feature_index / @bitSizeOf(usize); - const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize)); + const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); return (set.ints[usize_index] & (@as(usize, 1) << bit_index)) != 0; } /// Adds the specified feature but not its dependencies. pub fn addFeature(set: *Set, arch_feature_index: Index) void { const usize_index = arch_feature_index / @bitSizeOf(usize); - const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize)); + const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); set.ints[usize_index] |= @as(usize, 1) << bit_index; } @@ -730,7 +730,7 @@ pub const Target = struct { /// Removes the specified feature but not its dependents. pub fn removeFeature(set: *Set, arch_feature_index: Index) void { const usize_index = arch_feature_index / @bitSizeOf(usize); - const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize)); + const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); set.ints[usize_index] &= ~(@as(usize, 1) << bit_index); } @@ -745,7 +745,7 @@ pub const Target = struct { var old = set.ints; while (true) { for (all_features_list, 0..) |feature, index_usize| { - const index = @intCast(Index, index_usize); + const index = @as(Index, @intCast(index_usize)); if (set.isEnabled(index)) { set.addFeatureSet(feature.dependencies); } @@ -757,7 +757,7 @@ pub const Target = struct { } pub fn asBytes(set: *const Set) *const [byte_count]u8 { - return @ptrCast(*const [byte_count]u8, &set.ints); + return @as(*const [byte_count]u8, @ptrCast(&set.ints)); } pub fn eql(set: Set, other_set: Set) bool { @@ -1526,7 +1526,7 @@ pub const Target = struct { pub fn set(self: *DynamicLinker, dl_or_null: ?[]const u8) void { if (dl_or_null) |dl| { @memcpy(self.buffer[0..dl.len], dl); - self.max_byte = @intCast(u8, dl.len - 1); + self.max_byte = @as(u8, @intCast(dl.len - 1)); } else { self.max_byte = null; } @@ -1537,12 +1537,12 @@ pub const Target = struct { var result: DynamicLinker = .{}; const S = struct { fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker { - r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1); + r.max_byte = @as(u8, @intCast((std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1)); return r.*; } fn copy(r: *DynamicLinker, s: []const u8) DynamicLinker { @memcpy(r.buffer[0..s.len], s); - r.max_byte = @intCast(u8, s.len - 1); + r.max_byte = @as(u8, @intCast(s.len - 1)); return r.*; } }; @@ -1970,7 +1970,7 @@ pub const Target = struct { 16 => 2, 32 => 4, 64 => 8, - 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))), + 80 => @as(u16, @intCast(mem.alignForward(usize, 10, c_type_alignment(t, .longdouble)))), 128 => 16, else => unreachable, }, diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 2cdb78cd1d6d..313af987ab5b 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -63,7 +63,7 @@ pub const FailingAllocator = struct { log2_ptr_align: u8, return_address: usize, ) ?[*]u8 { - const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx)); + const self: *FailingAllocator = @ptrCast(@alignCast(ctx)); if (self.index == self.fail_index) { if (!self.has_induced_failure) { @memset(&self.stack_addresses, 0); @@ -91,7 +91,7 @@ pub const FailingAllocator = struct { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx)); + const self: *FailingAllocator = @ptrCast(@alignCast(ctx)); if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra)) return false; if (new_len < old_mem.len) { @@ -108,7 +108,7 @@ pub const FailingAllocator = struct { log2_old_align: u8, ra: usize, ) void { - const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx)); + const self: *FailingAllocator = @ptrCast(@alignCast(ctx)); self.internal_allocator.rawFree(old_mem, log2_old_align, ra); self.deallocations += 1; self.freed_bytes += old_mem.len; diff --git a/lib/std/time.zig b/lib/std/time.zig index 3eb342fa85a7..a60a0ef95912 100644 --- a/lib/std/time.zig +++ b/lib/std/time.zig @@ -70,7 +70,7 @@ pub fn timestamp() i64 { /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn milliTimestamp() i64 { - return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_ms)); + return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_ms))); } /// Get a calendar timestamp, in microseconds, relative to UTC 1970-01-01. @@ -79,7 +79,7 @@ pub fn milliTimestamp() i64 { /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn microTimestamp() i64 { - return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_us)); + return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_us))); } /// Get a calendar timestamp, in nanoseconds, relative to UTC 1970-01-01. @@ -96,7 +96,7 @@ pub fn nanoTimestamp() i128 { var ft: os.windows.FILETIME = undefined; os.windows.kernel32.GetSystemTimeAsFileTime(&ft); const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; - return @as(i128, @bitCast(i64, ft64) + epoch_adj) * 100; + return @as(i128, @as(i64, @bitCast(ft64)) + epoch_adj) * 100; } if (builtin.os.tag == .wasi and !builtin.link_libc) { @@ -239,9 +239,9 @@ pub const Instant = struct { } // Convert to ns using fixed point. - const scale = @as(u64, std.time.ns_per_s << 32) / @intCast(u32, qpf); + const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf)); const result = (@as(u96, qpc) * scale) >> 32; - return @truncate(u64, result); + return @as(u64, @truncate(result)); } // WASI timestamps are directly in nanoseconds @@ -250,9 +250,9 @@ pub const Instant = struct { } // Convert timespec diff to ns - const seconds = @intCast(u64, self.timestamp.tv_sec - earlier.timestamp.tv_sec); - const elapsed = (seconds * ns_per_s) + @intCast(u32, self.timestamp.tv_nsec); - return elapsed - @intCast(u32, earlier.timestamp.tv_nsec); + const seconds = @as(u64, @intCast(self.timestamp.tv_sec - earlier.timestamp.tv_sec)); + const elapsed = (seconds * ns_per_s) + @as(u32, @intCast(self.timestamp.tv_nsec)); + return elapsed - @as(u32, @intCast(earlier.timestamp.tv_nsec)); } }; diff --git a/lib/std/time/epoch.zig b/lib/std/time/epoch.zig index 279acc4298a8..f467721a492f 100644 --- a/lib/std/time/epoch.zig +++ b/lib/std/time/epoch.zig @@ -122,9 +122,9 @@ pub const YearAndDay = struct { if (days_left < days_in_month) break; days_left -= days_in_month; - month = @enumFromInt(Month, @intFromEnum(month) + 1); + month = @as(Month, @enumFromInt(@intFromEnum(month) + 1)); } - return .{ .month = month, .day_index = @intCast(u5, days_left) }; + return .{ .month = month, .day_index = @as(u5, @intCast(days_left)) }; } }; @@ -146,7 +146,7 @@ pub const EpochDay = struct { year_day -= year_size; year += 1; } - return .{ .year = year, .day = @intCast(u9, year_day) }; + return .{ .year = year, .day = @as(u9, @intCast(year_day)) }; } }; @@ -156,11 +156,11 @@ pub const DaySeconds = struct { /// the number of hours past the start of the day (0 to 23) pub fn getHoursIntoDay(self: DaySeconds) u5 { - return @intCast(u5, @divTrunc(self.secs, 3600)); + return @as(u5, @intCast(@divTrunc(self.secs, 3600))); } /// the number of minutes past the hour (0 to 59) pub fn getMinutesIntoHour(self: DaySeconds) u6 { - return @intCast(u6, @divTrunc(@mod(self.secs, 3600), 60)); + return @as(u6, @intCast(@divTrunc(@mod(self.secs, 3600), 60))); } /// the number of seconds past the start of the minute (0 to 59) pub fn getSecondsIntoMinute(self: DaySeconds) u6 { @@ -175,7 +175,7 @@ pub const EpochSeconds = struct { /// Returns the number of days since the epoch as an EpochDay. /// Use EpochDay to get information about the day of this time. pub fn getEpochDay(self: EpochSeconds) EpochDay { - return EpochDay{ .day = @intCast(u47, @divTrunc(self.secs, secs_per_day)) }; + return EpochDay{ .day = @as(u47, @intCast(@divTrunc(self.secs, secs_per_day))) }; } /// Returns the number of seconds into the day as DaySeconds. diff --git a/lib/std/tz.zig b/lib/std/tz.zig index 0cb9cefa5076..16288bd4ceb7 100644 --- a/lib/std/tz.zig +++ b/lib/std/tz.zig @@ -155,8 +155,8 @@ pub const Tz = struct { if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction leapseconds[i] = .{ - .occurrence = @intCast(i48, occur), - .correction = @intCast(i16, corr), + .occurrence = @as(i48, @intCast(occur)), + .correction = @as(i16, @intCast(corr)), }; } diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig index 1987d10b0d42..12cb74bd928d 100644 --- a/lib/std/unicode.zig +++ b/lib/std/unicode.zig @@ -45,22 +45,22 @@ pub fn utf8Encode(c: u21, out: []u8) !u3 { // - Increasing the initial shift by 6 each time // - Each time after the first shorten the shifted // value to a max of 0b111111 (63) - 1 => out[0] = @intCast(u8, c), // Can just do 0 + codepoint for initial range + 1 => out[0] = @as(u8, @intCast(c)), // Can just do 0 + codepoint for initial range 2 => { - out[0] = @intCast(u8, 0b11000000 | (c >> 6)); - out[1] = @intCast(u8, 0b10000000 | (c & 0b111111)); + out[0] = @as(u8, @intCast(0b11000000 | (c >> 6))); + out[1] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, 3 => { if (0xd800 <= c and c <= 0xdfff) return error.Utf8CannotEncodeSurrogateHalf; - out[0] = @intCast(u8, 0b11100000 | (c >> 12)); - out[1] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111)); - out[2] = @intCast(u8, 0b10000000 | (c & 0b111111)); + out[0] = @as(u8, @intCast(0b11100000 | (c >> 12))); + out[1] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111))); + out[2] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, 4 => { - out[0] = @intCast(u8, 0b11110000 | (c >> 18)); - out[1] = @intCast(u8, 0b10000000 | ((c >> 12) & 0b111111)); - out[2] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111)); - out[3] = @intCast(u8, 0b10000000 | (c & 0b111111)); + out[0] = @as(u8, @intCast(0b11110000 | (c >> 18))); + out[1] = @as(u8, @intCast(0b10000000 | ((c >> 12) & 0b111111))); + out[2] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111))); + out[3] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, else => unreachable, } @@ -695,11 +695,11 @@ pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u1 var it = view.iterator(); while (it.nextCodepoint()) |codepoint| { if (codepoint < 0x10000) { - const short = @intCast(u16, codepoint); + const short = @as(u16, @intCast(codepoint)); try result.append(mem.nativeToLittle(u16, short)); } else { - const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800; - const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00; + const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; + const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; var out: [2]u16 = undefined; out[0] = mem.nativeToLittle(u16, high); out[1] = mem.nativeToLittle(u16, low); @@ -720,12 +720,12 @@ pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize { const next_src_i = src_i + n; const codepoint = utf8Decode(utf8[src_i..next_src_i]) catch return error.InvalidUtf8; if (codepoint < 0x10000) { - const short = @intCast(u16, codepoint); + const short = @as(u16, @intCast(codepoint)); utf16le[dest_i] = mem.nativeToLittle(u16, short); dest_i += 1; } else { - const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800; - const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00; + const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; + const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; utf16le[dest_i] = mem.nativeToLittle(u16, high); utf16le[dest_i + 1] = mem.nativeToLittle(u16, low); dest_i += 2; diff --git a/lib/std/unicode/throughput_test.zig b/lib/std/unicode/throughput_test.zig index b828b4e43f4d..084406dc78a0 100644 --- a/lib/std/unicode/throughput_test.zig +++ b/lib/std/unicode/throughput_test.zig @@ -32,8 +32,8 @@ fn benchmarkCodepointCount(buf: []const u8) !ResultCount { } const end = timer.read(); - const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s; - const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s; + const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s)); return ResultCount{ .count = r, .throughput = throughput }; } diff --git a/lib/std/valgrind.zig b/lib/std/valgrind.zig index ae4fde0da18b..61312e2338cf 100644 --- a/lib/std/valgrind.zig +++ b/lib/std/valgrind.zig @@ -94,7 +94,7 @@ pub fn IsTool(base: [2]u8, code: usize) bool { } fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { - return doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5); + return doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doClientRequestStmt(request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { diff --git a/lib/std/valgrind/callgrind.zig b/lib/std/valgrind/callgrind.zig index f3d8c7ae3c7c..3ba74fb525be 100644 --- a/lib/std/valgrind/callgrind.zig +++ b/lib/std/valgrind/callgrind.zig @@ -11,7 +11,7 @@ pub const CallgrindClientRequest = enum(usize) { }; fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { - return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5); + return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doCallgrindClientRequestStmt(request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { diff --git a/lib/std/valgrind/memcheck.zig b/lib/std/valgrind/memcheck.zig index dd6c79cd90e3..7f5e973c4319 100644 --- a/lib/std/valgrind/memcheck.zig +++ b/lib/std/valgrind/memcheck.zig @@ -21,7 +21,7 @@ pub const MemCheckClientRequest = enum(usize) { }; fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { - return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5); + return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { @@ -31,24 +31,24 @@ fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: us /// Mark memory at qzz.ptr as unaddressable for qzz.len bytes. /// This returns -1 when run on Valgrind and 0 otherwise. pub fn makeMemNoAccess(qzz: []u8) i1 { - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Similarly, mark memory at qzz.ptr as addressable but undefined /// for qzz.len bytes. /// This returns -1 when run on Valgrind and 0 otherwise. pub fn makeMemUndefined(qzz: []u8) i1 { - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Similarly, mark memory at qzz.ptr as addressable and defined /// for qzz.len bytes. pub fn makeMemDefined(qzz: []u8) i1 { // This returns -1 when run on Valgrind and 0 otherwise. - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Similar to makeMemDefined except that addressability is @@ -56,8 +56,8 @@ pub fn makeMemDefined(qzz: []u8) i1 { /// but those which are not addressable are left unchanged. /// This returns -1 when run on Valgrind and 0 otherwise. pub fn makeMemDefinedIfAddressable(qzz: []u8) i1 { - return @intCast(i1, doMemCheckClientRequestExpr(0, // default return - .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); + return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return + .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0))); } /// Create a block-description handle. The description is an ascii @@ -195,7 +195,7 @@ test "countLeakBlocks" { /// impossible to segfault your system by using this call. pub fn getVbits(zza: []u8, zzvbits: []u8) u2 { std.debug.assert(zzvbits.len >= zza.len / 8); - return @intCast(u2, doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)); + return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0))); } /// Set the validity data for addresses zza, copying it @@ -208,7 +208,7 @@ pub fn getVbits(zza: []u8, zzvbits: []u8) u2 { /// impossible to segfault your system by using this call. pub fn setVbits(zzvbits: []u8, zza: []u8) u2 { std.debug.assert(zzvbits.len >= zza.len / 8); - return @intCast(u2, doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)); + return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0))); } /// Disable and re-enable reporting of addressing errors in the diff --git a/lib/std/zig.zig b/lib/std/zig.zig index fe6d2ec120ad..63b620f67493 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -36,7 +36,7 @@ pub fn hashSrc(src: []const u8) SrcHash { } pub fn srcHashEql(a: SrcHash, b: SrcHash) bool { - return @bitCast(u128, a) == @bitCast(u128, b); + return @as(u128, @bitCast(a)) == @as(u128, @bitCast(b)); } pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash { diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 86e4e488203f..a82982e26234 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -62,7 +62,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A const token = tokenizer.next(); try tokens.append(gpa, .{ .tag = token.tag, - .start = @intCast(u32, token.loc.start), + .start = @as(u32, @intCast(token.loc.start)), }); if (token.tag == .eof) break; } @@ -123,7 +123,7 @@ pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void /// should point after the token in the error message. pub fn errorOffset(tree: Ast, parse_error: Error) u32 { return if (parse_error.token_is_prev) - @intCast(u32, tree.tokenSlice(parse_error.token).len) + @as(u32, @intCast(tree.tokenSlice(parse_error.token).len)) else 0; } @@ -772,7 +772,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex { var n = node; var end_offset: TokenIndex = 0; while (true) switch (tags[n]) { - .root => return @intCast(TokenIndex, tree.tokens.len - 1), + .root => return @as(TokenIndex, @intCast(tree.tokens.len - 1)), .@"usingnamespace", .bool_not, @@ -1288,7 +1288,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex { n = extra.else_expr; }, .@"for" => { - const extra = @bitCast(Node.For, datas[n].rhs); + const extra = @as(Node.For, @bitCast(datas[n].rhs)); n = tree.extra_data[datas[n].lhs + extra.inputs + @intFromBool(extra.has_else)]; }, .@"suspend" => { @@ -1955,7 +1955,7 @@ pub fn forSimple(tree: Ast, node: Node.Index) full.For { pub fn forFull(tree: Ast, node: Node.Index) full.For { const data = tree.nodes.items(.data)[node]; - const extra = @bitCast(Node.For, data.rhs); + const extra = @as(Node.For, @bitCast(data.rhs)); const inputs = tree.extra_data[data.lhs..][0..extra.inputs]; const then_expr = tree.extra_data[data.lhs + extra.inputs]; const else_expr = if (extra.has_else) tree.extra_data[data.lhs + extra.inputs + 1] else 0; diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig index 13219888b277..d42b02d931dc 100644 --- a/lib/std/zig/CrossTarget.zig +++ b/lib/std/zig/CrossTarget.zig @@ -317,7 +317,7 @@ pub fn parse(args: ParseOptions) !CrossTarget { } const feature_name = cpu_features[start..index]; for (all_features, 0..) |feature, feat_index_usize| { - const feat_index = @intCast(Target.Cpu.Feature.Set.Index, feat_index_usize); + const feat_index = @as(Target.Cpu.Feature.Set.Index, @intCast(feat_index_usize)); if (mem.eql(u8, feature_name, feature.name)) { set.addFeature(feat_index); break; diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index fe3d97517f58..201c06d4d75f 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -94,7 +94,7 @@ pub fn getErrorMessageList(eb: ErrorBundle) ErrorMessageList { pub fn getMessages(eb: ErrorBundle) []const MessageIndex { const list = eb.getErrorMessageList(); - return @ptrCast([]const MessageIndex, eb.extra[list.start..][0..list.len]); + return @as([]const MessageIndex, @ptrCast(eb.extra[list.start..][0..list.len])); } pub fn getErrorMessage(eb: ErrorBundle, index: MessageIndex) ErrorMessage { @@ -109,7 +109,7 @@ pub fn getSourceLocation(eb: ErrorBundle, index: SourceLocationIndex) SourceLoca pub fn getNotes(eb: ErrorBundle, index: MessageIndex) []const MessageIndex { const notes_len = eb.getErrorMessage(index).notes_len; const start = @intFromEnum(index) + @typeInfo(ErrorMessage).Struct.fields.len; - return @ptrCast([]const MessageIndex, eb.extra[start..][0..notes_len]); + return @as([]const MessageIndex, @ptrCast(eb.extra[start..][0..notes_len])); } pub fn getCompileLogOutput(eb: ErrorBundle) [:0]const u8 { @@ -125,8 +125,8 @@ fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T, inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => eb.extra[i], - MessageIndex => @enumFromInt(MessageIndex, eb.extra[i]), - SourceLocationIndex => @enumFromInt(SourceLocationIndex, eb.extra[i]), + MessageIndex => @as(MessageIndex, @enumFromInt(eb.extra[i])), + SourceLocationIndex => @as(SourceLocationIndex, @enumFromInt(eb.extra[i])), else => @compileError("bad field type"), }; i += 1; @@ -202,7 +202,7 @@ fn renderErrorMessageToWriter( try counting_stderr.writeAll(": "); // This is the length of the part before the error message: // e.g. "file.zig:4:5: error: " - const prefix_len = @intCast(usize, counting_stderr.context.bytes_written); + const prefix_len = @as(usize, @intCast(counting_stderr.context.bytes_written)); try ttyconf.setColor(stderr, .reset); try ttyconf.setColor(stderr, .bold); if (err_msg.count == 1) { @@ -357,7 +357,7 @@ pub const Wip = struct { } const compile_log_str_index = if (compile_log_text.len == 0) 0 else str: { - const str = @intCast(u32, wip.string_bytes.items.len); + const str = @as(u32, @intCast(wip.string_bytes.items.len)); try wip.string_bytes.ensureUnusedCapacity(gpa, compile_log_text.len + 1); wip.string_bytes.appendSliceAssumeCapacity(compile_log_text); wip.string_bytes.appendAssumeCapacity(0); @@ -365,11 +365,11 @@ pub const Wip = struct { }; wip.setExtra(0, ErrorMessageList{ - .len = @intCast(u32, wip.root_list.items.len), - .start = @intCast(u32, wip.extra.items.len), + .len = @as(u32, @intCast(wip.root_list.items.len)), + .start = @as(u32, @intCast(wip.extra.items.len)), .compile_log_text = compile_log_str_index, }); - try wip.extra.appendSlice(gpa, @ptrCast([]const u32, wip.root_list.items)); + try wip.extra.appendSlice(gpa, @as([]const u32, @ptrCast(wip.root_list.items))); wip.root_list.clearAndFree(gpa); return .{ .string_bytes = try wip.string_bytes.toOwnedSlice(gpa), @@ -386,7 +386,7 @@ pub const Wip = struct { pub fn addString(wip: *Wip, s: []const u8) !u32 { const gpa = wip.gpa; - const index = @intCast(u32, wip.string_bytes.items.len); + const index = @as(u32, @intCast(wip.string_bytes.items.len)); try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); wip.string_bytes.appendSliceAssumeCapacity(s); wip.string_bytes.appendAssumeCapacity(0); @@ -395,7 +395,7 @@ pub const Wip = struct { pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) !u32 { const gpa = wip.gpa; - const index = @intCast(u32, wip.string_bytes.items.len); + const index = @as(u32, @intCast(wip.string_bytes.items.len)); try wip.string_bytes.writer(gpa).print(fmt, args); try wip.string_bytes.append(gpa, 0); return index; @@ -407,15 +407,15 @@ pub const Wip = struct { } pub fn addErrorMessage(wip: *Wip, em: ErrorMessage) !MessageIndex { - return @enumFromInt(MessageIndex, try addExtra(wip, em)); + return @as(MessageIndex, @enumFromInt(try addExtra(wip, em))); } pub fn addErrorMessageAssumeCapacity(wip: *Wip, em: ErrorMessage) MessageIndex { - return @enumFromInt(MessageIndex, addExtraAssumeCapacity(wip, em)); + return @as(MessageIndex, @enumFromInt(addExtraAssumeCapacity(wip, em))); } pub fn addSourceLocation(wip: *Wip, sl: SourceLocation) !SourceLocationIndex { - return @enumFromInt(SourceLocationIndex, try addExtra(wip, sl)); + return @as(SourceLocationIndex, @enumFromInt(try addExtra(wip, sl))); } pub fn addReferenceTrace(wip: *Wip, rt: ReferenceTrace) !void { @@ -431,7 +431,7 @@ pub const Wip = struct { const other_list = other.getMessages(); // The ensureUnusedCapacity call above guarantees this. - const notes_start = wip.reserveNotes(@intCast(u32, other_list.len)) catch unreachable; + const notes_start = wip.reserveNotes(@as(u32, @intCast(other_list.len))) catch unreachable; for (notes_start.., other_list) |note, message| { wip.extra.items[note] = @intFromEnum(wip.addOtherMessage(other, message) catch unreachable); } @@ -441,7 +441,7 @@ pub const Wip = struct { try wip.extra.ensureUnusedCapacity(wip.gpa, notes_len + notes_len * @typeInfo(ErrorBundle.ErrorMessage).Struct.fields.len); wip.extra.items.len += notes_len; - return @intCast(u32, wip.extra.items.len - notes_len); + return @as(u32, @intCast(wip.extra.items.len - notes_len)); } fn addOtherMessage(wip: *Wip, other: ErrorBundle, msg_index: MessageIndex) !MessageIndex { @@ -493,7 +493,7 @@ pub const Wip = struct { fn addExtraAssumeCapacity(wip: *Wip, extra: anytype) u32 { const fields = @typeInfo(@TypeOf(extra)).Struct.fields; - const result = @intCast(u32, wip.extra.items.len); + const result = @as(u32, @intCast(wip.extra.items.len)); wip.extra.items.len += fields.len; setExtra(wip, result, extra); return result; diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig index f3eec86acc31..14019571b138 100644 --- a/lib/std/zig/Parse.zig +++ b/lib/std/zig/Parse.zig @@ -36,20 +36,20 @@ const Members = struct { fn listToSpan(p: *Parse, list: []const Node.Index) !Node.SubRange { try p.extra_data.appendSlice(p.gpa, list); return Node.SubRange{ - .start = @intCast(Node.Index, p.extra_data.items.len - list.len), - .end = @intCast(Node.Index, p.extra_data.items.len), + .start = @as(Node.Index, @intCast(p.extra_data.items.len - list.len)), + .end = @as(Node.Index, @intCast(p.extra_data.items.len)), }; } fn addNode(p: *Parse, elem: Ast.Node) Allocator.Error!Node.Index { - const result = @intCast(Node.Index, p.nodes.len); + const result = @as(Node.Index, @intCast(p.nodes.len)); try p.nodes.append(p.gpa, elem); return result; } fn setNode(p: *Parse, i: usize, elem: Ast.Node) Node.Index { p.nodes.set(i, elem); - return @intCast(Node.Index, i); + return @as(Node.Index, @intCast(i)); } fn reserveNode(p: *Parse, tag: Ast.Node.Tag) !usize { @@ -72,7 +72,7 @@ fn unreserveNode(p: *Parse, node_index: usize) void { fn addExtra(p: *Parse, extra: anytype) Allocator.Error!Node.Index { const fields = std.meta.fields(@TypeOf(extra)); try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len); - const result = @intCast(u32, p.extra_data.items.len); + const result = @as(u32, @intCast(p.extra_data.items.len)); inline for (fields) |field| { comptime assert(field.type == Node.Index); p.extra_data.appendAssumeCapacity(@field(extra, field.name)); @@ -1202,10 +1202,10 @@ fn parseForStatement(p: *Parse) !Node.Index { .main_token = for_token, .data = .{ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start, - .rhs = @bitCast(u32, Node.For{ - .inputs = @intCast(u31, inputs), + .rhs = @as(u32, @bitCast(Node.For{ + .inputs = @as(u31, @intCast(inputs)), .has_else = has_else, - }), + })), }, }); } @@ -1486,7 +1486,7 @@ fn parseExprPrecedence(p: *Parse, min_prec: i32) Error!Node.Index { while (true) { const tok_tag = p.token_tags[p.tok_i]; - const info = operTable[@intCast(usize, @intFromEnum(tok_tag))]; + const info = operTable[@as(usize, @intCast(@intFromEnum(tok_tag)))]; if (info.prec < min_prec) { break; } @@ -2087,10 +2087,10 @@ fn parseForExpr(p: *Parse) !Node.Index { .main_token = for_token, .data = .{ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start, - .rhs = @bitCast(u32, Node.For{ - .inputs = @intCast(u31, inputs), + .rhs = @as(u32, @bitCast(Node.For{ + .inputs = @as(u31, @intCast(inputs)), .has_else = has_else, - }), + })), }, }); } @@ -2862,10 +2862,10 @@ fn parseForTypeExpr(p: *Parse) !Node.Index { .main_token = for_token, .data = .{ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start, - .rhs = @bitCast(u32, Node.For{ - .inputs = @intCast(u31, inputs), + .rhs = @as(u32, @bitCast(Node.For{ + .inputs = @as(u31, @intCast(inputs)), .has_else = has_else, - }), + })), }, }); } diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index f4f979f012da..468219f8f018 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -132,7 +132,7 @@ pub fn receiveMessage(s: *Server) !InMessage.Header { pub fn receiveBody_u32(s: *Server) !u32 { const fifo = &s.receive_fifo; const buf = fifo.readableSlice(0); - const result = @ptrCast(*align(1) const u32, buf[0..4]).*; + const result = @as(*align(1) const u32, @ptrCast(buf[0..4])).*; fifo.discard(4); return bswap(result); } @@ -140,7 +140,7 @@ pub fn receiveBody_u32(s: *Server) !u32 { pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void { return s.serveMessage(.{ .tag = tag, - .bytes_len = @intCast(u32, msg.len), + .bytes_len = @as(u32, @intCast(msg.len)), }, &.{msg}); } @@ -152,7 +152,7 @@ pub fn serveMessage( var iovecs: [10]std.os.iovec_const = undefined; const header_le = bswap(header); iovecs[0] = .{ - .iov_base = @ptrCast([*]const u8, &header_le), + .iov_base = @as([*]const u8, @ptrCast(&header_le)), .iov_len = @sizeOf(OutMessage.Header), }; for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| { @@ -171,7 +171,7 @@ pub fn serveEmitBinPath( ) !void { try s.serveMessage(.{ .tag = .emit_bin_path, - .bytes_len = @intCast(u32, fs_path.len + @sizeOf(OutMessage.EmitBinPath)), + .bytes_len = @as(u32, @intCast(fs_path.len + @sizeOf(OutMessage.EmitBinPath))), }, &.{ std.mem.asBytes(&header), fs_path, @@ -185,7 +185,7 @@ pub fn serveTestResults( const msg_le = bswap(msg); try s.serveMessage(.{ .tag = .test_results, - .bytes_len = @intCast(u32, @sizeOf(OutMessage.TestResults)), + .bytes_len = @as(u32, @intCast(@sizeOf(OutMessage.TestResults))), }, &.{ std.mem.asBytes(&msg_le), }); @@ -193,14 +193,14 @@ pub fn serveTestResults( pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void { const eb_hdr: OutMessage.ErrorBundle = .{ - .extra_len = @intCast(u32, error_bundle.extra.len), - .string_bytes_len = @intCast(u32, error_bundle.string_bytes.len), + .extra_len = @as(u32, @intCast(error_bundle.extra.len)), + .string_bytes_len = @as(u32, @intCast(error_bundle.string_bytes.len)), }; const bytes_len = @sizeOf(OutMessage.ErrorBundle) + 4 * error_bundle.extra.len + error_bundle.string_bytes.len; try s.serveMessage(.{ .tag = .error_bundle, - .bytes_len = @intCast(u32, bytes_len), + .bytes_len = @as(u32, @intCast(bytes_len)), }, &.{ std.mem.asBytes(&eb_hdr), // TODO: implement @ptrCast between slices changing the length @@ -218,8 +218,8 @@ pub const TestMetadata = struct { pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void { const header: OutMessage.TestMetadata = .{ - .tests_len = bswap(@intCast(u32, test_metadata.names.len)), - .string_bytes_len = bswap(@intCast(u32, test_metadata.string_bytes.len)), + .tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))), + .string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))), }; const bytes_len = @sizeOf(OutMessage.TestMetadata) + 3 * 4 * test_metadata.names.len + test_metadata.string_bytes.len; @@ -237,7 +237,7 @@ pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void { return s.serveMessage(.{ .tag = .test_metadata, - .bytes_len = @intCast(u32, bytes_len), + .bytes_len = @as(u32, @intCast(bytes_len)), }, &.{ std.mem.asBytes(&header), // TODO: implement @ptrCast between slices changing the length @@ -253,7 +253,7 @@ fn bswap(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { - .Enum => return @enumFromInt(T, @byteSwap(@intFromEnum(x))), + .Enum => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))), .Int => return @byteSwap(x), .Struct => |info| switch (info.layout) { .Extern => { @@ -265,7 +265,7 @@ fn bswap(x: anytype) @TypeOf(x) { }, .Packed => { const I = info.backing_integer.?; - return @bitCast(T, @byteSwap(@bitCast(I, x))); + return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x))))); }, .Auto => @compileError("auto layout struct"), }, @@ -286,7 +286,7 @@ fn bswap_and_workaround_u32(bytes_ptr: *const [4]u8) u32 { /// workaround for https://github.com/ziglang/zig/issues/14904 fn bswap_and_workaround_tag(bytes_ptr: *const [4]u8) InMessage.Tag { const int = std.mem.readIntLittle(u32, bytes_ptr); - return @enumFromInt(InMessage.Tag, int); + return @as(InMessage.Tag, @enumFromInt(int)); } const OutMessage = std.zig.Server.Message; diff --git a/lib/std/zig/c_builtins.zig b/lib/std/zig/c_builtins.zig index de9ac9560027..7f0414c96f2e 100644 --- a/lib/std/zig/c_builtins.zig +++ b/lib/std/zig/c_builtins.zig @@ -20,19 +20,19 @@ pub inline fn __builtin_signbitf(val: f32) c_int { pub inline fn __builtin_popcount(val: c_uint) c_int { // popcount of a c_uint will never exceed the capacity of a c_int @setRuntimeSafety(false); - return @bitCast(c_int, @as(c_uint, @popCount(val))); + return @as(c_int, @bitCast(@as(c_uint, @popCount(val)))); } pub inline fn __builtin_ctz(val: c_uint) c_int { // Returns the number of trailing 0-bits in val, starting at the least significant bit position. // In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint @setRuntimeSafety(false); - return @bitCast(c_int, @as(c_uint, @ctz(val))); + return @as(c_int, @bitCast(@as(c_uint, @ctz(val)))); } pub inline fn __builtin_clz(val: c_uint) c_int { // Returns the number of leading 0-bits in x, starting at the most significant bit position. // In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint @setRuntimeSafety(false); - return @bitCast(c_int, @as(c_uint, @clz(val))); + return @as(c_int, @bitCast(@as(c_uint, @clz(val)))); } pub inline fn __builtin_sqrt(val: f64) f64 { @@ -135,7 +135,7 @@ pub inline fn __builtin_object_size(ptr: ?*const anyopaque, ty: c_int) usize { // If it is not possible to determine which objects ptr points to at compile time, // __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0 // for type 2 or 3. - if (ty == 0 or ty == 1) return @bitCast(usize, -@as(isize, 1)); + if (ty == 0 or ty == 1) return @as(usize, @bitCast(-@as(isize, 1))); if (ty == 2 or ty == 3) return 0; unreachable; } @@ -151,8 +151,8 @@ pub inline fn __builtin___memset_chk( } pub inline fn __builtin_memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque { - const dst_cast = @ptrCast([*c]u8, dst); - @memset(dst_cast[0..len], @bitCast(u8, @truncate(i8, val))); + const dst_cast = @as([*c]u8, @ptrCast(dst)); + @memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val))))); return dst; } @@ -172,8 +172,8 @@ pub inline fn __builtin_memcpy( len: usize, ) ?*anyopaque { if (len > 0) @memcpy( - @ptrCast([*]u8, dst.?)[0..len], - @ptrCast([*]const u8, src.?), + @as([*]u8, @ptrCast(dst.?))[0..len], + @as([*]const u8, @ptrCast(src.?)), ); return dst; } @@ -202,8 +202,8 @@ pub inline fn __builtin_expect(expr: c_long, c: c_long) c_long { /// If tagp is empty, the function returns a NaN whose significand is zero. pub inline fn __builtin_nanf(tagp: []const u8) f32 { const parsed = std.fmt.parseUnsigned(c_ulong, tagp, 0) catch 0; - const bits = @truncate(u23, parsed); // single-precision float trailing significand is 23 bits - return @bitCast(f32, @as(u32, bits) | std.math.qnan_u32); + const bits = @as(u23, @truncate(parsed)); // single-precision float trailing significand is 23 bits + return @as(f32, @bitCast(@as(u32, bits) | std.math.qnan_u32)); } pub inline fn __builtin_huge_valf() f32 { diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig index dafef5e63b1e..2e7bb61df6b5 100644 --- a/lib/std/zig/c_translation.zig +++ b/lib/std/zig/c_translation.zig @@ -42,9 +42,9 @@ pub fn cast(comptime DestType: type, target: anytype) DestType { }, .Float => { switch (@typeInfo(SourceType)) { - .Int => return @floatFromInt(DestType, target), - .Float => return @floatCast(DestType, target), - .Bool => return @floatFromInt(DestType, @intFromBool(target)), + .Int => return @as(DestType, @floatFromInt(target)), + .Float => return @as(DestType, @floatCast(target)), + .Bool => return @as(DestType, @floatFromInt(@intFromBool(target))), else => {}, } }, @@ -65,36 +65,25 @@ fn castInt(comptime DestType: type, target: anytype) DestType { const source = @typeInfo(@TypeOf(target)).Int; if (dest.bits < source.bits) - return @bitCast(DestType, @truncate(std.meta.Int(source.signedness, dest.bits), target)) + return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), @truncate(target)))) else - return @bitCast(DestType, @as(std.meta.Int(source.signedness, dest.bits), target)); + return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), target))); } fn castPtr(comptime DestType: type, target: anytype) DestType { - const dest = ptrInfo(DestType); - const source = ptrInfo(@TypeOf(target)); - - if (source.is_const and !dest.is_const) - return @constCast(target) - else if (source.is_volatile and !dest.is_volatile) - return @volatileCast(target) - else if (@typeInfo(dest.child) == .Opaque) - // dest.alignment would error out - return @ptrCast(DestType, target) - else - return @ptrCast(DestType, @alignCast(dest.alignment, target)); + return @constCast(@volatileCast(@alignCast(@ptrCast(target)))); } fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype) DestType { switch (@typeInfo(SourceType)) { .Int => { - return @ptrFromInt(DestType, castInt(usize, target)); + return @as(DestType, @ptrFromInt(castInt(usize, target))); }, .ComptimeInt => { if (target < 0) - return @ptrFromInt(DestType, @bitCast(usize, @intCast(isize, target))) + return @as(DestType, @ptrFromInt(@as(usize, @bitCast(@as(isize, @intCast(target)))))) else - return @ptrFromInt(DestType, @intCast(usize, target)); + return @as(DestType, @ptrFromInt(@as(usize, @intCast(target)))); }, .Pointer => { return castPtr(DestType, target); @@ -120,34 +109,34 @@ fn ptrInfo(comptime PtrType: type) std.builtin.Type.Pointer { test "cast" { var i = @as(i64, 10); - try testing.expect(cast(*u8, 16) == @ptrFromInt(*u8, 16)); + try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16))); try testing.expect(cast(*u64, &i).* == @as(u64, 10)); try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i); - try testing.expect(cast(?*u8, 2) == @ptrFromInt(*u8, 2)); + try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2))); try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i); try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i); - try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(*u32, 4))); - try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(?*u32, 4))); + try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4)))); + try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4)))); try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10))); - try testing.expectEqual(@bitCast(i32, @as(u32, 0x8000_0000)), cast(i32, @as(u32, 0x8000_0000))); + try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000))); - try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*const u8, 2))); - try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*volatile u8, 2))); + try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2)))); + try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2)))); - try testing.expectEqual(@ptrFromInt(?*anyopaque, 2), cast(?*anyopaque, @ptrFromInt(*u8, 2))); + try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2)))); var foo: c_int = -1; - try testing.expect(cast(*anyopaque, -1) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)))); - try testing.expect(cast(*anyopaque, foo) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)))); - try testing.expect(cast(?*anyopaque, -1) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1)))); - try testing.expect(cast(?*anyopaque, foo) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1)))); + try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); + try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); + try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); + try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); const FnPtr = ?*align(1) const fn (*anyopaque) void; - try testing.expect(cast(FnPtr, 0) == @ptrFromInt(FnPtr, @as(usize, 0))); - try testing.expect(cast(FnPtr, foo) == @ptrFromInt(FnPtr, @bitCast(usize, @as(isize, -1)))); + try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0)))); + try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))))); } /// Given a value returns its size as C's sizeof operator would. @@ -192,7 +181,7 @@ pub fn sizeof(target: anytype) usize { const array_info = @typeInfo(ptr.child).Array; if ((array_info.child == u8 or array_info.child == u16) and array_info.sentinel != null and - @ptrCast(*align(1) const array_info.child, array_info.sentinel.?).* == 0) + @as(*align(1) const array_info.child, @ptrCast(array_info.sentinel.?)).* == 0) { // length of the string plus one for the null terminator. return (array_info.len + 1) * @sizeOf(array_info.child); @@ -325,10 +314,10 @@ test "promoteIntLiteral" { pub fn shuffleVectorIndex(comptime this_index: c_int, comptime source_vector_len: usize) i32 { if (this_index <= 0) return 0; - const positive_index = @intCast(usize, this_index); - if (positive_index < source_vector_len) return @intCast(i32, this_index); + const positive_index = @as(usize, @intCast(this_index)); + if (positive_index < source_vector_len) return @as(i32, @intCast(this_index)); const b_index = positive_index - source_vector_len; - return ~@intCast(i32, b_index); + return ~@as(i32, @intCast(b_index)); } test "shuffleVectorIndex" { diff --git a/lib/std/zig/number_literal.zig b/lib/std/zig/number_literal.zig index 66596b3b15d1..aba588a3ea8b 100644 --- a/lib/std/zig/number_literal.zig +++ b/lib/std/zig/number_literal.zig @@ -141,7 +141,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result { 'a'...'z' => c - 'a' + 10, else => return .{ .failure = .{ .invalid_character = i } }, }; - if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @enumFromInt(Base, base) } } }; + if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @as(Base, @enumFromInt(base)) } } }; if (exponent and digit >= 10) return .{ .failure = .{ .invalid_digit_exponent = i } }; underscore = false; special = 0; @@ -159,7 +159,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result { if (underscore) return .{ .failure = .{ .trailing_underscore = bytes.len - 1 } }; if (special != 0) return .{ .failure = .{ .trailing_special = bytes.len - 1 } }; - if (float) return .{ .float = @enumFromInt(FloatBase, base) }; - if (overflow) return .{ .big_int = @enumFromInt(Base, base) }; + if (float) return .{ .float = @as(FloatBase, @enumFromInt(base)) }; + if (overflow) return .{ .big_int = @as(Base, @enumFromInt(base)) }; return .{ .int = x }; } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index e41e9157e6c0..ca3e99b164d1 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -166,10 +166,10 @@ test "zig fmt: respect line breaks after var declarations" { \\ lookup_tables[1][p[6]] ^ \\ lookup_tables[2][p[5]] ^ \\ lookup_tables[3][p[4]] ^ - \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ - \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ - \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ - \\ lookup_tables[7][@truncate(u8, self.crc >> 0)]; + \\ lookup_tables[4][@as(u8, self.crc >> 24)] ^ + \\ lookup_tables[5][@as(u8, self.crc >> 16)] ^ + \\ lookup_tables[6][@as(u8, self.crc >> 8)] ^ + \\ lookup_tables[7][@as(u8, self.crc >> 0)]; \\ ); } @@ -1108,7 +1108,7 @@ test "zig fmt: async function" { \\ handleRequestFn: fn (*Server, *const std.net.Address, File) callconv(.Async) void, \\}; \\test "hi" { - \\ var ptr = @ptrCast(fn (i32) callconv(.Async) void, other); + \\ var ptr: fn (i32) callconv(.Async) void = @ptrCast(other); \\} \\ ); @@ -1825,10 +1825,10 @@ test "zig fmt: respect line breaks after infix operators" { \\ lookup_tables[1][p[6]] ^ \\ lookup_tables[2][p[5]] ^ \\ lookup_tables[3][p[4]] ^ - \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^ - \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^ - \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^ - \\ lookup_tables[7][@truncate(u8, self.crc >> 0)]; + \\ lookup_tables[4][@as(u8, self.crc >> 24)] ^ + \\ lookup_tables[5][@as(u8, self.crc >> 16)] ^ + \\ lookup_tables[6][@as(u8, self.crc >> 8)] ^ + \\ lookup_tables[7][@as(u8, self.crc >> 0)]; \\} \\ ); @@ -4814,7 +4814,7 @@ test "zig fmt: use of comments and multiline string literals may force the param \\ \\ unknown-length pointers and C pointers cannot be hashed deeply. \\ \\ Consider providing your own hash function. \\ ); - \\ return @intCast(i1, doMemCheckClientRequestExpr(0, // default return + \\ return @intCast(doMemCheckClientRequestExpr(0, // default return \\ .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)); \\} \\ diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig index df6097851037..a53dee7fa8ae 100644 --- a/lib/std/zig/perf_test.zig +++ b/lib/std/zig/perf_test.zig @@ -18,9 +18,9 @@ pub fn main() !void { } const end = timer.read(); memory_used /= iterations; - const elapsed_s = @floatFromInt(f64, end - start) / std.time.ns_per_s; - const bytes_per_sec_float = @floatFromInt(f64, source.len * iterations) / elapsed_s; - const bytes_per_sec = @intFromFloat(u64, @floor(bytes_per_sec_float)); + const elapsed_s = @as(f64, @floatFromInt(end - start)) / std.time.ns_per_s; + const bytes_per_sec_float = @as(f64, @floatFromInt(source.len * iterations)) / elapsed_s; + const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float))); var stdout_file = std.io.getStdOut(); const stdout = stdout_file.writer(); diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 72f54b3f4f1f..2cf7bc97165c 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -2719,7 +2719,7 @@ fn renderIdentifier(ais: *Ais, tree: Ast, token_index: Ast.TokenIndex, space: Sp while (contents_i < contents.len and buf_i < longest_keyword_or_primitive_len) { if (contents[contents_i] == '\\') { const res = std.zig.string_literal.parseEscapeSequence(contents, &contents_i).success; - buf[buf_i] = @intCast(u8, res); + buf[buf_i] = @as(u8, @intCast(res)); buf_i += 1; } else { buf[buf_i] = contents[contents_i]; @@ -2773,7 +2773,7 @@ fn renderIdentifierContents(writer: anytype, bytes: []const u8) !void { switch (res) { .success => |codepoint| { if (codepoint <= 0x7f) { - const buf = [1]u8{@intCast(u8, codepoint)}; + const buf = [1]u8{@as(u8, @intCast(codepoint))}; try std.fmt.format(writer, "{}", .{std.zig.fmtEscapes(&buf)}); } else { try writer.writeAll(escape_sequence); diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index 4859c379a081..53b1ab7ca83b 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -142,7 +142,7 @@ pub fn parseEscapeSequence(slice: []const u8, offset: *usize) ParsedCharLiteral return .{ .failure = .{ .expected_rbrace = i } }; } offset.* = i; - return .{ .success = @intCast(u21, value) }; + return .{ .success = @as(u21, @intCast(value)) }; }, else => return .{ .failure = .{ .invalid_escape_character = offset.* - 1 } }, } @@ -253,7 +253,7 @@ pub fn parseWrite(writer: anytype, bytes: []const u8) error{OutOfMemory}!Result }; try writer.writeAll(buf[0..len]); } else { - try writer.writeByte(@intCast(u8, codepoint)); + try writer.writeByte(@as(u8, @intCast(codepoint))); } }, .failure => |err| return Result{ .failure = err }, diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index 29ef752d7a0a..f69f1e1b1ee4 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -479,8 +479,8 @@ fn glibcVerFromRPath(rpath: []const u8) !std.SemanticVersion { fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined; _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len); - const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf); - const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf); + const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf)); + const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) { elf.ELFDATA2LSB => .Little, @@ -503,8 +503,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { if (sh_buf.len < shentsize) return error.InvalidElfFile; _ = try preadMin(file, &sh_buf, str_section_off, shentsize); - const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf)); - const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf)); + const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf)); + const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf)); const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; @@ -529,14 +529,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { shoff += shentsize; sh_buf_i += shentsize; }) { - const sh32 = @ptrCast( - *elf.Elf32_Shdr, - @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]), - ); - const sh64 = @ptrCast( - *elf.Elf64_Shdr, - @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]), - ); + const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); + const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name); const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0); if (mem.eql(u8, sh_name, ".dynstr")) { @@ -558,7 +552,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion { var buf: [80000]u8 = undefined; if (buf.len < dynstr.size) return error.InvalidGnuLibCVersion; - const dynstr_size = @intCast(usize, dynstr.size); + const dynstr_size = @as(usize, @intCast(dynstr.size)); const dynstr_bytes = buf[0..dynstr_size]; _ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len); var it = mem.splitScalar(u8, dynstr_bytes, 0); @@ -621,8 +615,8 @@ pub fn abiAndDynamicLinkerFromFile( ) AbiAndDynamicLinkerFromFileError!NativeTargetInfo { var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined; _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len); - const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf); - const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf); + const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf)); + const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) { elf.ELFDATA2LSB => .Little, @@ -668,21 +662,21 @@ pub fn abiAndDynamicLinkerFromFile( phoff += phentsize; ph_buf_i += phentsize; }) { - const ph32 = @ptrCast(*elf.Elf32_Phdr, @alignCast(@alignOf(elf.Elf32_Phdr), &ph_buf[ph_buf_i])); - const ph64 = @ptrCast(*elf.Elf64_Phdr, @alignCast(@alignOf(elf.Elf64_Phdr), &ph_buf[ph_buf_i])); + const ph32: *elf.Elf32_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i])); + const ph64: *elf.Elf64_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i])); const p_type = elfInt(is_64, need_bswap, ph32.p_type, ph64.p_type); switch (p_type) { elf.PT_INTERP => if (look_for_ld) { const p_offset = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset); const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz); if (p_filesz > result.dynamic_linker.buffer.len) return error.NameTooLong; - const filesz = @intCast(usize, p_filesz); + const filesz = @as(usize, @intCast(p_filesz)); _ = try preadMin(file, result.dynamic_linker.buffer[0..filesz], p_offset, filesz); // PT_INTERP includes a null byte in filesz. const len = filesz - 1; // dynamic_linker.max_byte is "max", not "len". // We know it will fit in u8 because we check against dynamic_linker.buffer.len above. - result.dynamic_linker.max_byte = @intCast(u8, len - 1); + result.dynamic_linker.max_byte = @as(u8, @intCast(len - 1)); // Use it to determine ABI. const full_ld_path = result.dynamic_linker.buffer[0..len]; @@ -720,14 +714,8 @@ pub fn abiAndDynamicLinkerFromFile( dyn_off += dyn_size; dyn_buf_i += dyn_size; }) { - const dyn32 = @ptrCast( - *elf.Elf32_Dyn, - @alignCast(@alignOf(elf.Elf32_Dyn), &dyn_buf[dyn_buf_i]), - ); - const dyn64 = @ptrCast( - *elf.Elf64_Dyn, - @alignCast(@alignOf(elf.Elf64_Dyn), &dyn_buf[dyn_buf_i]), - ); + const dyn32: *elf.Elf32_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i])); + const dyn64: *elf.Elf64_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i])); const tag = elfInt(is_64, need_bswap, dyn32.d_tag, dyn64.d_tag); const val = elfInt(is_64, need_bswap, dyn32.d_val, dyn64.d_val); if (tag == elf.DT_RUNPATH) { @@ -755,8 +743,8 @@ pub fn abiAndDynamicLinkerFromFile( if (sh_buf.len < shentsize) return error.InvalidElfFile; _ = try preadMin(file, &sh_buf, str_section_off, shentsize); - const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf)); - const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf)); + const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf)); + const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf)); const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; @@ -782,14 +770,8 @@ pub fn abiAndDynamicLinkerFromFile( shoff += shentsize; sh_buf_i += shentsize; }) { - const sh32 = @ptrCast( - *elf.Elf32_Shdr, - @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]), - ); - const sh64 = @ptrCast( - *elf.Elf64_Shdr, - @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]), - ); + const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); + const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i])); const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name); const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0); if (mem.eql(u8, sh_name, ".dynstr")) { diff --git a/lib/std/zig/system/arm.zig b/lib/std/zig/system/arm.zig index da05c8c90d09..7d466fc98438 100644 --- a/lib/std/zig/system/arm.zig +++ b/lib/std/zig/system/arm.zig @@ -141,7 +141,7 @@ pub const aarch64 = struct { } inline fn bitField(input: u64, offset: u6) u4 { - return @truncate(u4, input >> offset); + return @as(u4, @truncate(input >> offset)); } /// Input array should consist of readouts from 12 system registers such that: @@ -176,23 +176,23 @@ pub const aarch64 = struct { /// Takes readout of MIDR_EL1 register as input. fn detectNativeCoreInfo(midr: u64) CoreInfo { var info = CoreInfo{ - .implementer = @truncate(u8, midr >> 24), - .part = @truncate(u12, midr >> 4), + .implementer = @as(u8, @truncate(midr >> 24)), + .part = @as(u12, @truncate(midr >> 4)), }; blk: { if (info.implementer == 0x41) { // ARM Ltd. - const special_bits = @truncate(u4, info.part >> 8); + const special_bits = @as(u4, @truncate(info.part >> 8)); if (special_bits == 0x0 or special_bits == 0x7) { // TODO Variant and arch encoded differently. break :blk; } } - info.variant |= @intCast(u8, @truncate(u4, midr >> 20)) << 4; - info.variant |= @truncate(u4, midr); - info.architecture = @truncate(u4, midr >> 16); + info.variant |= @as(u8, @intCast(@as(u4, @truncate(midr >> 20)))) << 4; + info.variant |= @as(u4, @truncate(midr)); + info.architecture = @as(u4, @truncate(midr >> 16)); } return info; diff --git a/lib/std/zig/system/windows.zig b/lib/std/zig/system/windows.zig index c5c6f052ec6a..9c5b614c39dd 100644 --- a/lib/std/zig/system/windows.zig +++ b/lib/std/zig/system/windows.zig @@ -26,8 +26,8 @@ pub fn detectRuntimeVersion() WindowsVersion { // `---` `` ``--> Sub-version (Starting from Windows 10 onwards) // \ `--> Service pack (Always zero in the constants defined) // `--> OS version (Major & minor) - const os_ver: u16 = @intCast(u16, version_info.dwMajorVersion & 0xff) << 8 | - @intCast(u16, version_info.dwMinorVersion & 0xff); + const os_ver: u16 = @as(u16, @intCast(version_info.dwMajorVersion & 0xff)) << 8 | + @as(u16, @intCast(version_info.dwMinorVersion & 0xff)); const sp_ver: u8 = 0; const sub_ver: u8 = if (os_ver >= 0x0A00) subver: { // There's no other way to obtain this info beside @@ -38,12 +38,12 @@ pub fn detectRuntimeVersion() WindowsVersion { if (version_info.dwBuildNumber >= build) last_idx = i; } - break :subver @truncate(u8, last_idx); + break :subver @as(u8, @truncate(last_idx)); } else 0; const version: u32 = @as(u32, os_ver) << 16 | @as(u16, sp_ver) << 8 | sub_ver; - return @enumFromInt(WindowsVersion, version); + return @as(WindowsVersion, @enumFromInt(version)); } // Technically, a registry value can be as long as 1MB. However, MS recommends storing @@ -100,11 +100,11 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void { REG.MULTI_SZ, => { comptime assert(@sizeOf(std.os.windows.UNICODE_STRING) % 2 == 0); - const unicode = @ptrCast(*std.os.windows.UNICODE_STRING, &tmp_bufs[i]); + const unicode = @as(*std.os.windows.UNICODE_STRING, @ptrCast(&tmp_bufs[i])); unicode.* = .{ .Length = 0, .MaximumLength = max_value_len - @sizeOf(std.os.windows.UNICODE_STRING), - .Buffer = @ptrCast([*]u16, tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..]), + .Buffer = @as([*]u16, @ptrCast(tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..])), }; break :blk unicode; }, @@ -159,7 +159,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void { REG.MULTI_SZ, => { var buf = @field(args, field.name).value_buf; - const entry = @ptrCast(*align(1) const std.os.windows.UNICODE_STRING, table[i + 1].EntryContext); + const entry = @as(*align(1) const std.os.windows.UNICODE_STRING, @ptrCast(table[i + 1].EntryContext)); const len = try std.unicode.utf16leToUtf8(buf, entry.Buffer[0 .. entry.Length / 2]); buf[len] = 0; }, @@ -168,7 +168,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void { REG.DWORD_BIG_ENDIAN, REG.QWORD, => { - const entry = @ptrCast([*]align(1) const u8, table[i + 1].EntryContext); + const entry = @as([*]align(1) const u8, @ptrCast(table[i + 1].EntryContext)); switch (@field(args, field.name).value_type) { REG.DWORD, REG.DWORD_BIG_ENDIAN => { @memcpy(@field(args, field.name).value_buf[0..4], entry[0..4]); @@ -254,18 +254,18 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu { // CP 4039 -> ID_AA64MMFR1_EL1 // CP 403A -> ID_AA64MMFR2_EL1 getCpuInfoFromRegistry(i, .{ - .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[0]) }, - .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[1]) }, - .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[2]) }, - .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[3]) }, - .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[4]) }, - .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[5]) }, - .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[6]) }, - .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[7]) }, - .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[8]) }, - .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[9]) }, - .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[10]) }, - .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, ®isters[11]) }, + .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[0])) }, + .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[1])) }, + .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[2])) }, + .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[3])) }, + .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[4])) }, + .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[5])) }, + .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[6])) }, + .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[7])) }, + .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[8])) }, + .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[9])) }, + .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[10])) }, + .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(®isters[11])) }, }) catch break :blk null; cores[i] = @import("arm.zig").aarch64.detectNativeCpuAndFeatures(current_arch, registers) orelse diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 0d6a6d4fd807..72f65afb3a6c 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -1290,7 +1290,7 @@ pub const Tokenizer = struct { // check utf8-encoded character. const length = std.unicode.utf8ByteSequenceLength(c0) catch return 1; if (self.index + length > self.buffer.len) { - return @intCast(u3, self.buffer.len - self.index); + return @as(u3, @intCast(self.buffer.len - self.index)); } const bytes = self.buffer[self.index .. self.index + length]; switch (length) { diff --git a/lib/test_runner.zig b/lib/test_runner.zig index 8bc79a96c8b6..842babcdeb5f 100644 --- a/lib/test_runner.zig +++ b/lib/test_runner.zig @@ -70,12 +70,12 @@ fn mainServer() !void { defer std.testing.allocator.free(expected_panic_msgs); for (test_fns, names, async_frame_sizes, expected_panic_msgs) |test_fn, *name, *async_frame_size, *expected_panic_msg| { - name.* = @intCast(u32, string_bytes.items.len); + name.* = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.ensureUnusedCapacity(std.testing.allocator, test_fn.name.len + 1); string_bytes.appendSliceAssumeCapacity(test_fn.name); string_bytes.appendAssumeCapacity(0); - async_frame_size.* = @intCast(u32, test_fn.async_frame_size orelse 0); + async_frame_size.* = @as(u32, @intCast(test_fn.async_frame_size orelse 0)); expected_panic_msg.* = 0; } @@ -163,7 +163,7 @@ fn mainTerminal() void { std.heap.page_allocator.free(async_frame_buffer); async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory"); } - const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func); + const casted_fn = @as(fn () callconv(.Async) anyerror!void, @ptrCast(test_fn.func)); break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{}); }, .blocking => { diff --git a/src/Air.zig b/src/Air.zig index ec2baf0dabfb..f7762a5e86d1 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1106,7 +1106,7 @@ pub const VectorCmp = struct { op: u32, pub fn compareOperator(self: VectorCmp) std.math.CompareOperator { - return @enumFromInt(std.math.CompareOperator, @truncate(u3, self.op)); + return @as(std.math.CompareOperator, @enumFromInt(@as(u3, @truncate(self.op)))); } pub fn encodeOp(compare_operator: std.math.CompareOperator) u32 { @@ -1151,11 +1151,11 @@ pub const Cmpxchg = struct { flags: u32, pub fn successOrder(self: Cmpxchg) std.builtin.AtomicOrder { - return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags)); + return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags)))); } pub fn failureOrder(self: Cmpxchg) std.builtin.AtomicOrder { - return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags >> 3)); + return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags >> 3)))); } }; @@ -1166,11 +1166,11 @@ pub const AtomicRmw = struct { flags: u32, pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder { - return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags)); + return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags)))); } pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp { - return @enumFromInt(std.builtin.AtomicRmwOp, @truncate(u4, self.flags >> 3)); + return @as(std.builtin.AtomicRmwOp, @enumFromInt(@as(u4, @truncate(self.flags >> 3)))); } }; @@ -1451,7 +1451,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @intFromEnum(ref); if (ref_int < ref_start_index) { - const ip_index = @enumFromInt(InternPool.Index, ref_int); + const ip_index = @as(InternPool.Index, @enumFromInt(ref_int)); return ip_index.toType(); } const inst_index = ref_int - ref_start_index; @@ -1472,9 +1472,9 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => air.extra[i], - Inst.Ref => @enumFromInt(Inst.Ref, air.extra[i]), - i32 => @bitCast(i32, air.extra[i]), - InternPool.Index => @enumFromInt(InternPool.Index, air.extra[i]), + Inst.Ref => @as(Inst.Ref, @enumFromInt(air.extra[i])), + i32 => @as(i32, @bitCast(air.extra[i])), + InternPool.Index => @as(InternPool.Index, @enumFromInt(air.extra[i])), else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; @@ -1494,7 +1494,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { - return @enumFromInt(Inst.Ref, ref_start_index + inst); + return @as(Inst.Ref, @enumFromInt(ref_start_index + inst)); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { @@ -1516,10 +1516,10 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @intFromEnum(inst); if (ref_int < ref_start_index) { - const ip_index = @enumFromInt(InternPool.Index, ref_int); + const ip_index = @as(InternPool.Index, @enumFromInt(ref_int)); return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); + const inst_index = @as(Air.Inst.Index, @intCast(ref_int - ref_start_index)); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .interned => return air_datas[inst_index].interned.toValue(), @@ -1747,7 +1747,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .work_group_id, => false, - .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, + .assembly => @as(u1, @truncate(air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31)) != 0, .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), diff --git a/src/AstGen.zig b/src/AstGen.zig index df64d5854910..c7ac569246c6 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -70,7 +70,7 @@ fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 { fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, astgen.extra.items.len); + const result = @as(u32, @intCast(astgen.extra.items.len)); astgen.extra.items.len += fields.len; setExtra(astgen, result, extra); return result; @@ -83,11 +83,11 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { astgen.extra.items[i] = switch (field.type) { u32 => @field(extra, field.name), Zir.Inst.Ref => @intFromEnum(@field(extra, field.name)), - i32 => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)), - Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.Call.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.BuiltinCall.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.SwitchBlock.Bits => @as(u32, @bitCast(@field(extra, field.name))), + Zir.Inst.FuncFancy.Bits => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }; i += 1; @@ -95,18 +95,18 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { } fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 { - const result = @intCast(u32, astgen.extra.items.len); + const result = @as(u32, @intCast(astgen.extra.items.len)); try astgen.extra.resize(astgen.gpa, result + size); return result; } fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void { - const coerced = @ptrCast([]const u32, refs); + const coerced = @as([]const u32, @ptrCast(refs)); return astgen.extra.appendSlice(astgen.gpa, coerced); } fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void { - const coerced = @ptrCast([]const u32, refs); + const coerced = @as([]const u32, @ptrCast(refs)); astgen.extra.appendSliceAssumeCapacity(coerced); } @@ -176,7 +176,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len); astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{ - .items_len = @intCast(u32, astgen.compile_errors.items.len), + .items_len = @as(u32, @intCast(astgen.compile_errors.items.len)), }); for (astgen.compile_errors.items) |item| { @@ -192,7 +192,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len); astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{ - .imports_len = @intCast(u32, astgen.imports.count()), + .imports_len = @as(u32, @intCast(astgen.imports.count())), }); var it = astgen.imports.iterator(); @@ -1334,7 +1334,7 @@ fn fnProtoExpr( var param_gz = block_scope.makeSubBlock(scope); defer param_gz.unstack(); const param_type = try expr(¶m_gz, scope, coerced_type_ri, param_type_node); - const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); + const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1)); _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); const main_tokens = tree.nodes.items(.main_token); const name_token = param.name_token orelse main_tokens[param_type_node]; @@ -1468,7 +1468,7 @@ fn arrayInitExpr( const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr); _ = try gz.addPlNode(.validate_array_init_ty, node, Zir.Inst.ArrayInit{ .ty = array_type_inst, - .init_count = @intCast(u32, array_init.ast.elements.len), + .init_count = @as(u32, @intCast(array_init.ast.elements.len)), }); break :inst .{ .array = array_type_inst, @@ -1533,7 +1533,7 @@ fn arrayInitExprRlNone( const astgen = gz.astgen; const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ - .operands_len = @intCast(u32, elements.len), + .operands_len = @as(u32, @intCast(elements.len)), }); var extra_index = try reserveExtra(astgen, elements.len); @@ -1558,7 +1558,7 @@ fn arrayInitExprInner( const len = elements.len + @intFromBool(array_ty_inst != .none); const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ - .operands_len = @intCast(u32, len), + .operands_len = @as(u32, @intCast(len)), }); var extra_index = try reserveExtra(astgen, len); if (array_ty_inst != .none) { @@ -1574,7 +1574,7 @@ fn arrayInitExprInner( .tag = .elem_type_index, .data = .{ .bin = .{ .lhs = array_ty_inst, - .rhs = @enumFromInt(Zir.Inst.Ref, i), + .rhs = @as(Zir.Inst.Ref, @enumFromInt(i)), } }, }); break :ri ResultInfo{ .rl = .{ .coerced_ty = ty_expr } }; @@ -1619,14 +1619,14 @@ fn arrayInitExprRlPtrInner( const astgen = gz.astgen; const payload_index = try addExtra(astgen, Zir.Inst.Block{ - .body_len = @intCast(u32, elements.len), + .body_len = @as(u32, @intCast(elements.len)), }); var extra_index = try reserveExtra(astgen, elements.len); for (elements, 0..) |elem_init, i| { const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{ .ptr = result_ptr, - .index = @intCast(u32, i), + .index = @as(u32, @intCast(i)), }); astgen.extra.items[extra_index] = refToIndex(elem_ptr).?; extra_index += 1; @@ -1776,7 +1776,7 @@ fn structInitExprRlNone( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{ - .fields_len = @intCast(u32, struct_init.ast.fields.len), + .fields_len = @as(u32, @intCast(struct_init.ast.fields.len)), }); const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len; var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); @@ -1834,7 +1834,7 @@ fn structInitExprRlPtrInner( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.Block{ - .body_len = @intCast(u32, struct_init.ast.fields.len), + .body_len = @as(u32, @intCast(struct_init.ast.fields.len)), }); var extra_index = try reserveExtra(astgen, struct_init.ast.fields.len); @@ -1866,7 +1866,7 @@ fn structInitExprRlTy( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.StructInit{ - .fields_len = @intCast(u32, struct_init.ast.fields.len), + .fields_len = @as(u32, @intCast(struct_init.ast.fields.len)), }); const field_size = @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len; var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); @@ -2105,7 +2105,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn } const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); - const search_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const search_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); try genDefers(parent_gz, scope, parent_scope, .normal_only); @@ -2511,17 +2511,17 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .call, .field_call => { const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; const slot = &gz.astgen.extra.items[extra_index]; - var flags = @bitCast(Zir.Inst.Call.Flags, slot.*); + var flags = @as(Zir.Inst.Call.Flags, @bitCast(slot.*)); flags.ensure_result_used = true; - slot.* = @bitCast(u32, flags); + slot.* = @as(u32, @bitCast(flags)); break :b true; }, .builtin_call => { const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; const slot = &gz.astgen.extra.items[extra_index]; - var flags = @bitCast(Zir.Inst.BuiltinCall.Flags, slot.*); + var flags = @as(Zir.Inst.BuiltinCall.Flags, @bitCast(slot.*)); flags.ensure_result_used = true; - slot.* = @bitCast(u32, flags); + slot.* = @as(u32, @bitCast(flags)); break :b true; }, @@ -2897,7 +2897,7 @@ fn genDefers( .index = defer_scope.index, .len = defer_scope.len, }); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .defer_err_code, .data = .{ .defer_err_code = .{ @@ -2976,7 +2976,7 @@ fn deferStmt( const sub_scope = if (!have_err_code) &defer_gen.base else blk: { try gz.addDbgBlockBegin(); const ident_name = try gz.astgen.identAsString(payload_token); - remapped_err_code = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + remapped_err_code = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gz.astgen.gpa, .{ .tag = .extended, .data = .{ .extended = .{ @@ -3016,7 +3016,7 @@ fn deferStmt( break :blk gz.astgen.countBodyLenAfterFixups(body) + refs; }; - const index = @intCast(u32, gz.astgen.extra.items.len); + const index = @as(u32, @intCast(gz.astgen.extra.items.len)); try gz.astgen.extra.ensureUnusedCapacity(gz.astgen.gpa, body_len); if (have_err_code) { if (gz.astgen.ref_table.fetchRemove(remapped_err_code)) |kv| { @@ -3554,7 +3554,7 @@ fn ptrType( gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_end_ref)); } - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); const result = indexToRef(new_index); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ .ptr_type = .{ @@ -3645,7 +3645,7 @@ const WipMembers = struct { const max_decl_size = 11; fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self { - const payload_top = @intCast(u32, payload.items.len); + const payload_top = @as(u32, @intCast(payload.items.len)); const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32; const field_bits_start = decls_start + decl_count * max_decl_size; const fields_start = field_bits_start + if (bits_per_field > 0) blk: { @@ -3700,7 +3700,7 @@ const WipMembers = struct { fn appendToDeclSlice(self: *Self, data: []const u32) void { assert(self.decls_end + data.len <= self.field_bits_start); @memcpy(self.payload.items[self.decls_end..][0..data.len], data); - self.decls_end += @intCast(u32, data.len); + self.decls_end += @as(u32, @intCast(data.len)); } fn appendToField(self: *Self, data: u32) void { @@ -3713,14 +3713,14 @@ const WipMembers = struct { const empty_decl_slots = decls_per_u32 - (self.decl_index % decls_per_u32); if (self.decl_index > 0 and empty_decl_slots < decls_per_u32) { const index = self.payload_top + self.decl_index / decls_per_u32; - self.payload.items[index] >>= @intCast(u5, empty_decl_slots * bits_per_decl); + self.payload.items[index] >>= @as(u5, @intCast(empty_decl_slots * bits_per_decl)); } if (bits_per_field > 0) { const fields_per_u32 = 32 / bits_per_field; const empty_field_slots = fields_per_u32 - (self.field_index % fields_per_u32); if (self.field_index > 0 and empty_field_slots < fields_per_u32) { const index = self.field_bits_start + self.field_index / fields_per_u32; - self.payload.items[index] >>= @intCast(u5, empty_field_slots * bits_per_field); + self.payload.items[index] >>= @as(u5, @intCast(empty_field_slots * bits_per_field)); } } } @@ -3882,7 +3882,7 @@ fn fnDecl( var param_gz = decl_gz.makeSubBlock(scope); defer param_gz.unstack(); const param_type = try expr(¶m_gz, params_scope, coerced_type_ri, param_type_node); - const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); + const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1)); _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); const main_tokens = tree.nodes.items(.main_token); @@ -4097,7 +4097,7 @@ fn fnDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4248,7 +4248,7 @@ fn globalVarDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4303,7 +4303,7 @@ fn comptimeDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4355,7 +4355,7 @@ fn usingnamespaceDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4542,7 +4542,7 @@ fn testDecl( { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); - const casted = @bitCast([4]u32, contents_hash); + const casted = @as([4]u32, @bitCast(contents_hash)); wip_members.appendToDeclSlice(&casted); } { @@ -4642,7 +4642,7 @@ fn structDeclInner( }; const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); - const field_count = @intCast(u32, container_decl.ast.members.len - decl_count); + const field_count = @as(u32, @intCast(container_decl.ast.members.len - decl_count)); const bits_per_field = 4; const max_field_size = 5; @@ -4750,7 +4750,7 @@ fn structDeclInner( const old_scratch_len = astgen.scratch.items.len; try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len))); block_scope.instructions.items.len = block_scope.instructions_top; } else { wip_members.appendToField(@intFromEnum(field_type)); @@ -4768,7 +4768,7 @@ fn structDeclInner( const old_scratch_len = astgen.scratch.items.len; try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len))); block_scope.instructions.items.len = block_scope.instructions_top; } @@ -4783,7 +4783,7 @@ fn structDeclInner( const old_scratch_len = astgen.scratch.items.len; try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len))); block_scope.instructions.items.len = block_scope.instructions_top; } else if (member.comptime_token) |comptime_token| { return astgen.failTok(comptime_token, "comptime field without default initialization value", .{}); @@ -4796,7 +4796,7 @@ fn structDeclInner( .fields_len = field_count, .decls_len = decl_count, .backing_int_ref = backing_int_ref, - .backing_int_body_len = @intCast(u32, backing_int_body_len), + .backing_int_body_len = @as(u32, @intCast(backing_int_body_len)), .known_non_opv = known_non_opv, .known_comptime_only = known_comptime_only, .is_tuple = is_tuple, @@ -4856,7 +4856,7 @@ fn unionDeclInner( defer block_scope.unstack(); const decl_count = try astgen.scanDecls(&namespace, members); - const field_count = @intCast(u32, members.len - decl_count); + const field_count = @as(u32, @intCast(members.len - decl_count)); if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) { const layout_str = if (layout == .Extern) "extern" else "packed"; @@ -5151,7 +5151,7 @@ fn containerDecl( const bits_per_field = 1; const max_field_size = 3; - var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(u32, counts.decls), @intCast(u32, counts.total_fields), bits_per_field, max_field_size); + var wip_members = try WipMembers.init(gpa, &astgen.scratch, @as(u32, @intCast(counts.decls)), @as(u32, @intCast(counts.total_fields)), bits_per_field, max_field_size); defer wip_members.deinit(); for (container_decl.ast.members) |member_node| { @@ -5209,8 +5209,8 @@ fn containerDecl( .nonexhaustive = nonexhaustive, .tag_type = arg_inst, .body_len = body_len, - .fields_len = @intCast(u32, counts.total_fields), - .decls_len = @intCast(u32, counts.decls), + .fields_len = @as(u32, @intCast(counts.total_fields)), + .decls_len = @as(u32, @intCast(counts.decls)), }); wip_members.finishBits(bits_per_field); @@ -5400,7 +5400,7 @@ fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zi } setExtra(astgen, payload_index, Zir.Inst.ErrorSetDecl{ - .fields_len = @intCast(u32, fields_len), + .fields_len = @as(u32, @intCast(fields_len)), }); const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index); return rvalue(gz, ri, result, node); @@ -6463,7 +6463,7 @@ fn forExpr( { var capture_token = for_full.payload_token; for (for_full.ast.inputs, 0..) |input, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const capture_is_ref = token_tags[capture_token] == .asterisk; const ident_tok = capture_token + @intFromBool(capture_is_ref); const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_"); @@ -6521,7 +6521,7 @@ fn forExpr( // We use a dedicated ZIR instruction to assert the lengths to assist with // nicer error reporting as well as fewer ZIR bytes emitted. const len: Zir.Inst.Ref = len: { - const lens_len = @intCast(u32, lens.len); + const lens_len = @as(u32, @intCast(lens.len)); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len); const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{ .operands_len = lens_len, @@ -6591,7 +6591,7 @@ fn forExpr( var capture_token = for_full.payload_token; var capture_sub_scope: *Scope = &then_scope.base; for (for_full.ast.inputs, 0..) |input, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const capture_is_ref = token_tags[capture_token] == .asterisk; const ident_tok = capture_token + @intFromBool(capture_is_ref); const capture_name = tree.tokenSlice(ident_tok); @@ -6891,7 +6891,7 @@ fn switchExpr( // If any prong has an inline tag capture, allocate a shared dummy instruction for it const tag_inst = if (any_has_tag_capture) tag_inst: { - const inst = @intCast(Zir.Inst.Index, astgen.instructions.len); + const inst = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); try astgen.instructions.append(astgen.gpa, .{ .tag = .extended, .data = .{ .extended = .{ @@ -6984,7 +6984,7 @@ fn switchExpr( break :blk &tag_scope.base; }; - const header_index = @intCast(u32, payloads.items.len); + const header_index = @as(u32, @intCast(payloads.items.len)); const body_len_index = if (is_multi_case) blk: { payloads.items[multi_case_table + multi_case_index] = header_index; multi_case_index += 1; @@ -7074,12 +7074,12 @@ fn switchExpr( }; const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); try payloads.ensureUnusedCapacity(gpa, body_len); - payloads.items[body_len_index] = @bitCast(u32, Zir.Inst.SwitchBlock.ProngInfo{ - .body_len = @intCast(u28, body_len), + payloads.items[body_len_index] = @as(u32, @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ + .body_len = @as(u28, @intCast(body_len)), .capture = capture, .is_inline = case.inline_token != null, .has_tag_capture = has_tag_capture, - }); + })); if (astgen.ref_table.fetchRemove(switch_block)) |kv| { appendPossiblyRefdBodyInst(astgen, payloads, kv.value); } @@ -7106,7 +7106,7 @@ fn switchExpr( .has_else = special_prong == .@"else", .has_under = special_prong == .under, .any_has_tag_capture = any_has_tag_capture, - .scalar_cases_len = @intCast(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, scalar_cases_len), + .scalar_cases_len = @as(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, @intCast(scalar_cases_len)), }, }); @@ -7140,7 +7140,7 @@ fn switchExpr( end_index += 3 + items_len + 2 * ranges_len; } - const body_len = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, payloads.items[body_len_index]).body_len; + const body_len = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(payloads.items[body_len_index])).body_len; end_index += body_len; switch (strat.tag) { @@ -7579,7 +7579,7 @@ fn tunnelThroughClosure( .src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token), } }, }); - gop.value_ptr.* = @intCast(Zir.Inst.Index, gz.astgen.instructions.len - 1); + gop.value_ptr.* = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len - 1)); } // Add an instruction to get the value from the closure into @@ -7680,7 +7680,7 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: }; // If the value fits into a f64 without losing any precision, store it that way. @setFloatMode(.Strict); - const smaller_float = @floatCast(f64, float_number); + const smaller_float = @as(f64, @floatCast(float_number)); const bigger_again: f128 = smaller_float; if (bigger_again == float_number) { const result = try gz.addFloat(smaller_float); @@ -7688,12 +7688,12 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: } // We need to use 128 bits. Break the float into 4 u32 values so we can // put it into the `extra` array. - const int_bits = @bitCast(u128, float_number); + const int_bits = @as(u128, @bitCast(float_number)); const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{ - .piece0 = @truncate(u32, int_bits), - .piece1 = @truncate(u32, int_bits >> 32), - .piece2 = @truncate(u32, int_bits >> 64), - .piece3 = @truncate(u32, int_bits >> 96), + .piece0 = @as(u32, @truncate(int_bits)), + .piece1 = @as(u32, @truncate(int_bits >> 32)), + .piece2 = @as(u32, @truncate(int_bits >> 64)), + .piece3 = @as(u32, @truncate(int_bits >> 96)), }); return rvalue(gz, ri, result, source_node); }, @@ -7719,22 +7719,22 @@ fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token }); }, .digit_after_base => return astgen.failTok(token, "expected a digit after base prefix", .{}), - .upper_case_base => |i| return astgen.failOff(token, @intCast(u32, i), "base prefix must be lowercase", .{}), - .invalid_float_base => |i| return astgen.failOff(token, @intCast(u32, i), "invalid base for float literal", .{}), - .repeated_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "repeated digit separator", .{}), - .invalid_underscore_after_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before digit separator", .{}), - .invalid_digit => |info| return astgen.failOff(token, @intCast(u32, info.i), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), - .invalid_digit_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "invalid digit '{c}' in exponent", .{bytes[i]}), - .duplicate_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "duplicate exponent", .{}), - .exponent_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before exponent", .{}), - .special_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before '{c}'", .{bytes[i]}), - .trailing_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit after '{c}'", .{bytes[i - 1]}), - .trailing_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "trailing digit separator", .{}), + .upper_case_base => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "base prefix must be lowercase", .{}), + .invalid_float_base => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "invalid base for float literal", .{}), + .repeated_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "repeated digit separator", .{}), + .invalid_underscore_after_special => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before digit separator", .{}), + .invalid_digit => |info| return astgen.failOff(token, @as(u32, @intCast(info.i)), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), + .invalid_digit_exponent => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "invalid digit '{c}' in exponent", .{bytes[i]}), + .duplicate_exponent => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "duplicate exponent", .{}), + .exponent_after_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before exponent", .{}), + .special_after_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before '{c}'", .{bytes[i]}), + .trailing_special => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit after '{c}'", .{bytes[i - 1]}), + .trailing_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "trailing digit separator", .{}), .duplicate_period => unreachable, // Validated by tokenizer .invalid_character => unreachable, // Validated by tokenizer .invalid_exponent_sign => |i| { assert(bytes.len >= 2 and bytes[0] == '0' and bytes[1] == 'x'); // Validated by tokenizer - return astgen.failOff(token, @intCast(u32, i), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); + return astgen.failOff(token, @as(u32, @intCast(i)), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); }, } } @@ -7801,7 +7801,7 @@ fn asmExpr( if (output_type_bits != 0) { return astgen.failNode(output_node, "inline assembly allows up to one output value", .{}); } - output_type_bits |= @as(u32, 1) << @intCast(u5, i); + output_type_bits |= @as(u32, 1) << @as(u5, @intCast(i)); const out_type_node = node_datas[output_node].lhs; const out_type_inst = try typeExpr(gz, scope, out_type_node); outputs[i] = .{ @@ -8024,11 +8024,11 @@ fn ptrCast( node = node_datas[node].lhs; } - const flags_i = @bitCast(u5, flags); + const flags_i = @as(u5, @bitCast(flags)); assert(flags_i != 0); const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true }; - if (flags_i == @bitCast(u5, ptr_only)) { + if (flags_i == @as(u5, @bitCast(ptr_only))) { // Special case: simpler representation return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast"); } @@ -8037,7 +8037,7 @@ fn ptrCast( .const_cast = true, .volatile_cast = true, }; - if ((flags_i & ~@bitCast(u5, no_result_ty_flags)) == 0) { + if ((flags_i & ~@as(u5, @bitCast(no_result_ty_flags))) == 0) { // Result type not needed const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); const operand = try expr(gz, scope, .{ .rl = .none }, node); @@ -8119,8 +8119,8 @@ fn typeOf( const body = typeof_scope.instructionsSlice(); const body_len = astgen.countBodyLenAfterFixups(body); astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ - .body_len = @intCast(u32, body_len), - .body_index = @intCast(u32, astgen.extra.items.len), + .body_len = @as(u32, @intCast(body_len)), + .body_index = @as(u32, @intCast(astgen.extra.items.len)), .src_node = gz.nodeIndexToRelative(node), }); try astgen.extra.ensureUnusedCapacity(gpa, body_len); @@ -8464,7 +8464,7 @@ fn builtinCall( .node = gz.nodeIndexToRelative(node), .operand = operand, }); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -9115,7 +9115,7 @@ fn callExpr( } assert(node != 0); - const call_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const call_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); const call_inst = Zir.indexToRef(call_index); try gz.astgen.instructions.append(astgen.gpa, undefined); try gz.instructions.append(astgen.gpa, call_index); @@ -9139,7 +9139,7 @@ fn callExpr( try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body)); appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - astgen.scratch.items[scratch_index] = @intCast(u32, astgen.scratch.items.len - scratch_top); + astgen.scratch.items[scratch_index] = @as(u32, @intCast(astgen.scratch.items.len - scratch_top)); scratch_index += 1; } @@ -9157,8 +9157,8 @@ fn callExpr( .callee = callee_obj, .flags = .{ .pop_error_return_trace = !propagate_error_trace, - .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @intFromEnum(modifier)), - .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len), + .packed_modifier = @as(Zir.Inst.Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))), + .args_len = @as(Zir.Inst.Call.Flags.PackedArgsLen, @intCast(call.ast.params.len)), }, }); if (call.ast.params.len != 0) { @@ -9178,8 +9178,8 @@ fn callExpr( .field_name_start = callee_field.field_name_start, .flags = .{ .pop_error_return_trace = !propagate_error_trace, - .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @intFromEnum(modifier)), - .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len), + .packed_modifier = @as(Zir.Inst.Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))), + .args_len = @as(Zir.Inst.Call.Flags.PackedArgsLen, @intCast(call.ast.params.len)), }, }); if (call.ast.params.len != 0) { @@ -10552,7 +10552,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .invalid_escape_character => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid escape character: '{c}'", .{raw_string[bad_index]}, ); @@ -10560,7 +10560,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_hex_digit => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit, found '{c}'", .{raw_string[bad_index]}, ); @@ -10568,7 +10568,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .empty_unicode_escape_sequence => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "empty unicode escape sequence", .{}, ); @@ -10576,7 +10576,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_hex_digit_or_rbrace => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit or '}}', found '{c}'", .{raw_string[bad_index]}, ); @@ -10584,7 +10584,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .invalid_unicode_codepoint => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "unicode escape does not correspond to a valid codepoint", .{}, ); @@ -10592,7 +10592,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_lbrace => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '{{', found '{c}", .{raw_string[bad_index]}, ); @@ -10600,7 +10600,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_rbrace => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '}}', found '{c}", .{raw_string[bad_index]}, ); @@ -10608,7 +10608,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .expected_single_quote => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected single quote ('), found '{c}", .{raw_string[bad_index]}, ); @@ -10616,7 +10616,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token .invalid_character => |bad_index| { return astgen.failOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid byte in string or character literal: '{c}'", .{raw_string[bad_index]}, ); @@ -10651,14 +10651,14 @@ fn appendErrorNodeNotes( ) Allocator.Error!void { @setCold(true); const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); const notes_index: u32 = if (notes.len != 0) blk: { const notes_start = astgen.extra.items.len; try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); - astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len)); + astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len))); astgen.extra.appendSliceAssumeCapacity(notes); - break :blk @intCast(u32, notes_start); + break :blk @as(u32, @intCast(notes_start)); } else 0; try astgen.compile_errors.append(astgen.gpa, .{ .msg = msg, @@ -10743,14 +10743,14 @@ fn appendErrorTokNotesOff( @setCold(true); const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(gpa).print(format ++ "\x00", args); const notes_index: u32 = if (notes.len != 0) blk: { const notes_start = astgen.extra.items.len; try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len); - astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len)); + astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len))); astgen.extra.appendSliceAssumeCapacity(notes); - break :blk @intCast(u32, notes_start); + break :blk @as(u32, @intCast(notes_start)); } else 0; try astgen.compile_errors.append(gpa, .{ .msg = msg, @@ -10779,7 +10779,7 @@ fn errNoteTokOff( ) Allocator.Error!u32 { @setCold(true); const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); return astgen.addExtra(Zir.Inst.CompileErrors.Item{ .msg = msg, @@ -10798,7 +10798,7 @@ fn errNoteNode( ) Allocator.Error!u32 { @setCold(true); const string_bytes = &astgen.string_bytes; - const msg = @intCast(u32, string_bytes.items.len); + const msg = @as(u32, @intCast(string_bytes.items.len)); try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); return astgen.addExtra(Zir.Inst.CompileErrors.Item{ .msg = msg, @@ -10812,7 +10812,7 @@ fn errNoteNode( fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); try astgen.appendIdentStr(ident_token, string_bytes); const key: []const u8 = string_bytes.items[str_index..]; const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ @@ -10858,7 +10858,7 @@ fn docCommentAsStringFromFirst( const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_starts = astgen.tree.tokens.items(.start); const token_tags = astgen.tree.tokens.items(.tag); @@ -10901,7 +10901,7 @@ const IndexSlice = struct { index: u32, len: u32 }; fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_bytes = astgen.tree.tokenSlice(str_lit_token); try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); const key = string_bytes.items[str_index..]; @@ -10914,7 +10914,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { string_bytes.shrinkRetainingCapacity(str_index); return IndexSlice{ .index = gop.key_ptr.*, - .len = @intCast(u32, key.len), + .len = @as(u32, @intCast(key.len)), }; } else { gop.key_ptr.* = str_index; @@ -10924,7 +10924,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { try string_bytes.append(gpa, 0); return IndexSlice{ .index = str_index, - .len = @intCast(u32, key.len), + .len = @as(u32, @intCast(key.len)), }; } } @@ -10961,15 +10961,15 @@ fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { const len = string_bytes.items.len - str_index; try string_bytes.append(gpa, 0); return IndexSlice{ - .index = @intCast(u32, str_index), - .len = @intCast(u32, len), + .index = @as(u32, @intCast(str_index)), + .len = @as(u32, @intCast(len)), }; } fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); + const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_bytes = astgen.tree.tokenSlice(str_lit_token); try string_bytes.append(gpa, 0); // Indicates this is a test. try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); @@ -11321,7 +11321,7 @@ const GenZir = struct { } fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 { - return @bitCast(i32, node_index) - @bitCast(i32, gz.decl_node_index); + return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(gz.decl_node_index)); } fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 { @@ -11478,7 +11478,7 @@ const GenZir = struct { const astgen = gz.astgen; const gpa = astgen.gpa; const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref; - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); try astgen.instructions.ensureUnusedCapacity(gpa, 1); @@ -11496,8 +11496,8 @@ const GenZir = struct { const block = node_datas[fn_decl].rhs; const rbrace_start = token_starts[tree.lastToken(block)]; astgen.advanceSourceCursor(rbrace_start); - const rbrace_line = @intCast(u32, astgen.source_line - gz.decl_line); - const rbrace_column = @intCast(u32, astgen.source_column); + const rbrace_line = @as(u32, @intCast(astgen.source_line - gz.decl_line)); + const rbrace_column = @as(u32, @intCast(astgen.source_column)); const columns = args.lbrace_column | (rbrace_column << 16); src_locs_buffer[0] = args.lbrace_line; @@ -11733,18 +11733,18 @@ const GenZir = struct { astgen.extra.appendAssumeCapacity(@intFromEnum(args.init)); } - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = .variable, - .small = @bitCast(u16, Zir.Inst.ExtendedVar.Small{ + .small = @as(u16, @bitCast(Zir.Inst.ExtendedVar.Small{ .has_lib_name = args.lib_name != 0, .has_align = args.align_inst != .none, .has_init = args.init != .none, .is_extern = args.is_extern, .is_threadlocal = args.is_threadlocal, - }), + })), .operand = payload_index, } }, }); @@ -11764,7 +11764,7 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .bool_br = .{ @@ -11790,12 +11790,12 @@ const GenZir = struct { try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .int_big, .data = .{ .str = .{ - .start = @intCast(u32, astgen.string_bytes.items.len), - .len = @intCast(u32, limbs.len), + .start = @as(u32, @intCast(astgen.string_bytes.items.len)), + .len = @as(u32, @intCast(limbs.len)), } }, }); gz.instructions.appendAssumeCapacity(new_index); @@ -11835,7 +11835,7 @@ const GenZir = struct { src_node: Ast.Node.Index, ) !Zir.Inst.Index { assert(operand != .none); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gz.astgen.gpa, .{ .tag = tag, .data = .{ .un_node = .{ @@ -11858,7 +11858,7 @@ const GenZir = struct { try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_node = .{ @@ -11910,12 +11910,12 @@ const GenZir = struct { const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{ .name = name, .doc_comment = doc_comment_index, - .body_len = @intCast(u32, body_len), + .body_len = @as(u32, @intCast(body_len)), }); gz.astgen.appendBodyWithFixups(param_body); param_gz.unstack(); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_tok = .{ @@ -11943,7 +11943,7 @@ const GenZir = struct { try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -11975,12 +11975,12 @@ const GenZir = struct { const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{ .src_node = gz.nodeIndexToRelative(node), }); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, - .small = @intCast(u16, operands.len), + .small = @as(u16, @intCast(operands.len)), .operand = payload_index, } }, }); @@ -12000,12 +12000,12 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, - .small = @intCast(u16, trailing_len), + .small = @as(u16, @intCast(trailing_len)), .operand = payload_index, } }, }); @@ -12038,7 +12038,7 @@ const GenZir = struct { abs_tok_index: Ast.TokenIndex, ) !Zir.Inst.Index { const astgen = gz.astgen; - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); assert(operand != .none); try astgen.instructions.append(astgen.gpa, .{ .tag = tag, @@ -12121,7 +12121,7 @@ const GenZir = struct { .operand_src_node = Zir.Inst.Break.no_src_node, }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12147,7 +12147,7 @@ const GenZir = struct { .operand_src_node = Zir.Inst.Break.no_src_node, }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12174,7 +12174,7 @@ const GenZir = struct { .operand_src_node = gz.nodeIndexToRelative(operand_src_node), }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12201,7 +12201,7 @@ const GenZir = struct { .operand_src_node = gz.nodeIndexToRelative(operand_src_node), }; const payload_index = try gz.astgen.addExtra(extra); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .@"break" = .{ @@ -12293,7 +12293,7 @@ const GenZir = struct { .data = .{ .extended = .{ .opcode = opcode, .small = undefined, - .operand = @bitCast(u32, gz.nodeIndexToRelative(src_node)), + .operand = @as(u32, @bitCast(gz.nodeIndexToRelative(src_node))), } }, }); } @@ -12336,7 +12336,7 @@ const GenZir = struct { const is_comptime: u4 = @intFromBool(args.is_comptime); const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -12390,12 +12390,12 @@ const GenZir = struct { // * 0b000000XX_XXX00000 - `inputs_len`. // * 0b0XXXXX00_00000000 - `clobbers_len`. // * 0bX0000000_00000000 - is volatile - const small: u16 = @intCast(u16, args.outputs.len) | - @intCast(u16, args.inputs.len << 5) | - @intCast(u16, args.clobbers.len << 10) | + const small: u16 = @as(u16, @intCast(args.outputs.len)) | + @as(u16, @intCast(args.inputs.len << 5)) | + @as(u16, @intCast(args.clobbers.len << 10)) | (@as(u16, @intFromBool(args.is_volatile)) << 15); - const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ @@ -12412,7 +12412,7 @@ const GenZir = struct { /// Does *not* append the block instruction to the scope. /// Leaves the `payload_index` field undefined. fn makeBlockInst(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); const gpa = gz.astgen.gpa; try gz.astgen.instructions.append(gpa, .{ .tag = tag, @@ -12429,7 +12429,7 @@ const GenZir = struct { fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gpa, .{ .tag = tag, .data = .{ .pl_node = .{ @@ -12456,11 +12456,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 6); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.fields_len != 0) { astgen.extra.appendAssumeCapacity(args.fields_len); @@ -12478,7 +12478,7 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .struct_decl, - .small = @bitCast(u16, Zir.Inst.StructDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.StructDecl.Small{ .has_src_node = args.src_node != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, @@ -12488,7 +12488,7 @@ const GenZir = struct { .is_tuple = args.is_tuple, .name_strategy = gz.anon_name_strategy, .layout = args.layout, - }), + })), .operand = payload_index, } }, }); @@ -12507,11 +12507,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 5); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); @@ -12529,7 +12529,7 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .union_decl, - .small = @bitCast(u16, Zir.Inst.UnionDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.UnionDecl.Small{ .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, @@ -12538,7 +12538,7 @@ const GenZir = struct { .name_strategy = gz.anon_name_strategy, .layout = args.layout, .auto_enum_tag = args.auto_enum_tag, - }), + })), .operand = payload_index, } }, }); @@ -12556,11 +12556,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 5); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); @@ -12578,7 +12578,7 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .enum_decl, - .small = @bitCast(u16, Zir.Inst.EnumDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.EnumDecl.Small{ .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, @@ -12586,7 +12586,7 @@ const GenZir = struct { .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, .nonexhaustive = args.nonexhaustive, - }), + })), .operand = payload_index, } }, }); @@ -12600,11 +12600,11 @@ const GenZir = struct { const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 2); - const payload_index = @intCast(u32, astgen.extra.items.len); + const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); - astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); @@ -12613,11 +12613,11 @@ const GenZir = struct { .tag = .extended, .data = .{ .extended = .{ .opcode = .opaque_decl, - .small = @bitCast(u16, Zir.Inst.OpaqueDecl.Small{ + .small = @as(u16, @bitCast(Zir.Inst.OpaqueDecl.Small{ .has_src_node = args.src_node != 0, .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, - }), + })), .operand = payload_index, } }, }); @@ -12632,7 +12632,7 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(inst); gz.instructions.appendAssumeCapacity(new_index); return new_index; @@ -12643,7 +12643,7 @@ const GenZir = struct { try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.len += 1; gz.instructions.appendAssumeCapacity(new_index); return new_index; @@ -12695,7 +12695,7 @@ const GenZir = struct { return; } - const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gpa, .{ .tag = .dbg_block_end, .data = undefined }); try gz.instructions.append(gpa, new_index); } @@ -12704,7 +12704,7 @@ const GenZir = struct { /// This can only be for short-lived references; the memory becomes invalidated /// when another string is added. fn nullTerminatedString(astgen: AstGen, index: usize) [*:0]const u8 { - return @ptrCast([*:0]const u8, astgen.string_bytes.items.ptr) + index; + return @as([*:0]const u8, @ptrCast(astgen.string_bytes.items.ptr)) + index; } /// Local variables shadowing detection, including function parameters. @@ -12983,7 +12983,7 @@ fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { .extended => { const zir_data = astgen.instructions.items(.data); if (zir_data[inst].extended.opcode != .alloc) return false; - const small = @bitCast(Zir.Inst.AllocExtended.Small, zir_data[inst].extended.small); + const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(zir_data[inst].extended.small)); return !small.has_type; }, @@ -13027,7 +13027,7 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { check_inst = ref_inst; } } - return @intCast(u32, count); + return @as(u32, @intCast(count)); } fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void { @@ -13059,7 +13059,7 @@ fn lowerAstErrors(astgen: *AstGen) !void { if (token_tags[parse_err.token + @intFromBool(parse_err.token_is_prev)] == .invalid) { const tok = parse_err.token + @intFromBool(parse_err.token_is_prev); - const bad_off = @intCast(u32, tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len); + const bad_off = @as(u32, @intCast(tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len)); const byte_abs = token_starts[parse_err.token + @intFromBool(parse_err.token_is_prev)] + bad_off; try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{ std.zig.fmtEscapes(tree.source[byte_abs..][0..1]), diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 33c57b119792..1b9988c0c3cd 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -110,7 +110,7 @@ pub fn generateZirData(self: *Autodoc) !void { comptime std.debug.assert(@intFromEnum(InternPool.Index.first_type) == 0); var i: u32 = 0; while (i <= @intFromEnum(InternPool.Index.last_type)) : (i += 1) { - const ip_index = @enumFromInt(InternPool.Index, i); + const ip_index = @as(InternPool.Index, @enumFromInt(i)); var tmpbuf = std.ArrayList(u8).init(self.arena); if (ip_index == .generic_poison_type) { // Not a real type, doesn't have a normal name @@ -1669,7 +1669,7 @@ fn walkInstruction( // present in json var sentinel: ?DocData.Expr = null; if (ptr.flags.has_sentinel) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); sentinel = ref_result.expr; extra_index += 1; @@ -1677,21 +1677,21 @@ fn walkInstruction( var @"align": ?DocData.Expr = null; if (ptr.flags.has_align) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); @"align" = ref_result.expr; extra_index += 1; } var address_space: ?DocData.Expr = null; if (ptr.flags.has_addrspace) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); address_space = ref_result.expr; extra_index += 1; } var bit_start: ?DocData.Expr = null; if (ptr.flags.has_bit_range) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); address_space = ref_result.expr; extra_index += 1; @@ -1699,7 +1699,7 @@ fn walkInstruction( var host_size: ?DocData.Expr = null; if (ptr.flags.has_bit_range) { - const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false); host_size = ref_result.expr; } @@ -2549,11 +2549,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); + const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2606,7 +2606,7 @@ fn walkInstruction( .variable => { const extra = file.zir.extraData(Zir.Inst.ExtendedVar, extended.operand); - const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); + const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; if (small.has_lib_name) extra_index += 1; if (small.has_align) extra_index += 1; @@ -2619,7 +2619,7 @@ fn walkInstruction( }; if (small.has_init) { - const var_init_ref = @enumFromInt(Ref, file.zir.extra[extra_index]); + const var_init_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index])); const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type); value.expr = var_init.expr; value.typeRef = var_init.typeRef; @@ -2636,11 +2636,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2655,7 +2655,7 @@ fn walkInstruction( const tag_type_ref: ?Ref = if (small.has_tag_type) blk: { const tag_type = file.zir.extra[extra_index]; extra_index += 1; - const tag_ref = @enumFromInt(Ref, tag_type); + const tag_ref = @as(Ref, @enumFromInt(tag_type)); break :blk tag_ref; } else null; @@ -2763,11 +2763,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); + const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2780,7 +2780,7 @@ fn walkInstruction( const tag_type: ?DocData.Expr = if (small.has_tag_type) blk: { const tag_type = file.zir.extra[extra_index]; extra_index += 1; - const tag_ref = @enumFromInt(Ref, tag_type); + const tag_ref = @as(Ref, @enumFromInt(tag_type)); const wr = try self.walkRef(file, parent_scope, parent_src, tag_ref, false); break :blk wr.expr; } else null; @@ -2826,7 +2826,7 @@ fn walkInstruction( bit_bag_idx += 1; } - const has_value = @truncate(u1, cur_bit_bag) != 0; + const has_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name_index = file.zir.extra[extra_index]; @@ -2838,7 +2838,7 @@ fn walkInstruction( const value_expr: ?DocData.Expr = if (has_value) blk: { const value_ref = file.zir.extra[extra_index]; extra_index += 1; - const value = try self.walkRef(file, &scope, src_info, @enumFromInt(Ref, value_ref), false); + const value = try self.walkRef(file, &scope, src_info, @as(Ref, @enumFromInt(value_ref)), false); break :blk value.expr; } else null; try field_values.append(self.arena, value_expr); @@ -2899,11 +2899,11 @@ fn walkInstruction( .enclosing_type = type_slot_index, }; - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, file.zir.extra[extra_index]); + const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -2927,7 +2927,7 @@ fn walkInstruction( const backing_int_body_len = file.zir.extra[extra_index]; extra_index += 1; // backing_int_body_len if (backing_int_body_len == 0) { - const backing_int_ref = @enumFromInt(Ref, file.zir.extra[extra_index]); + const backing_int_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index])); const backing_int_res = try self.walkRef(file, &scope, src_info, backing_int_ref, true); backing_int = backing_int_res.expr; extra_index += 1; // backing_int_ref @@ -3154,7 +3154,7 @@ fn analyzeAllDecls( priv_decl_indexes: *std.ArrayListUnmanaged(usize), ) AutodocErrors!usize { const first_decl_indexes_slot = decl_indexes.items.len; - const original_it = file.zir.declIterator(@intCast(u32, parent_inst_index)); + const original_it = file.zir.declIterator(@as(u32, @intCast(parent_inst_index))); // First loop to discover decl names { @@ -3180,7 +3180,7 @@ fn analyzeAllDecls( const decl_name_index = file.zir.extra[d.sub_index + 5]; switch (decl_name_index) { 0 => { - const is_exported = @truncate(u1, d.flags >> 1); + const is_exported = @as(u1, @truncate(d.flags >> 1)); switch (is_exported) { 0 => continue, // comptime decl 1 => { @@ -3255,10 +3255,10 @@ fn analyzeDecl( d: Zir.DeclIterator.Item, ) AutodocErrors!void { const data = file.zir.instructions.items(.data); - const is_pub = @truncate(u1, d.flags >> 0) != 0; + const is_pub = @as(u1, @truncate(d.flags >> 0)) != 0; // const is_exported = @truncate(u1, d.flags >> 1) != 0; - const has_align = @truncate(u1, d.flags >> 2) != 0; - const has_section_or_addrspace = @truncate(u1, d.flags >> 3) != 0; + const has_align = @as(u1, @truncate(d.flags >> 2)) != 0; + const has_section_or_addrspace = @as(u1, @truncate(d.flags >> 3)) != 0; var extra_index = d.sub_index; // const hash_u32s = file.zir.extra[extra_index..][0..4]; @@ -3277,21 +3277,21 @@ fn analyzeDecl( extra_index += 1; const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); extra_index += 1; break :inst inst; }; _ = align_inst; const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); extra_index += 1; break :inst inst; }; _ = section_inst; const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); extra_index += 1; break :inst inst; }; @@ -3381,7 +3381,7 @@ fn analyzeUsingnamespaceDecl( ) AutodocErrors!void { const data = file.zir.instructions.items(.data); - const is_pub = @truncate(u1, d.flags) != 0; + const is_pub = @as(u1, @truncate(d.flags)) != 0; const value_index = file.zir.extra[d.sub_index + 6]; const doc_comment_index = file.zir.extra[d.sub_index + 7]; @@ -4028,7 +4028,7 @@ fn analyzeFancyFunction( ) AutodocErrors!DocData.WalkResult { const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); - const fn_info = file.zir.getFnInfo(@intCast(u32, inst_index)); + const fn_info = file.zir.getFnInfo(@as(u32, @intCast(inst_index))); try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len); var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( @@ -4108,7 +4108,7 @@ fn analyzeFancyFunction( var align_index: ?usize = null; if (extra.data.bits.has_align_ref) { - const align_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); align_index = self.exprs.items.len; _ = try self.walkRef(file, scope, parent_src, align_ref, false); extra_index += 1; @@ -4125,7 +4125,7 @@ fn analyzeFancyFunction( var addrspace_index: ?usize = null; if (extra.data.bits.has_addrspace_ref) { - const addrspace_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); addrspace_index = self.exprs.items.len; _ = try self.walkRef(file, scope, parent_src, addrspace_ref, false); extra_index += 1; @@ -4142,7 +4142,7 @@ fn analyzeFancyFunction( var section_index: ?usize = null; if (extra.data.bits.has_section_ref) { - const section_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const section_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); section_index = self.exprs.items.len; _ = try self.walkRef(file, scope, parent_src, section_ref, false); extra_index += 1; @@ -4159,7 +4159,7 @@ fn analyzeFancyFunction( var cc_index: ?usize = null; if (extra.data.bits.has_cc_ref and !extra.data.bits.has_cc_body) { - const cc_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); const cc_expr = try self.walkRef(file, scope, parent_src, cc_ref, false); cc_index = self.exprs.items.len; @@ -4262,7 +4262,7 @@ fn analyzeFunction( ) AutodocErrors!DocData.WalkResult { const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); - const fn_info = file.zir.getFnInfo(@intCast(u32, inst_index)); + const fn_info = file.zir.getFnInfo(@as(u32, @intCast(inst_index))); try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len); var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( @@ -4449,13 +4449,13 @@ fn collectUnionFieldInfo( cur_bit_bag = file.zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_type = @truncate(u1, cur_bit_bag) != 0; + const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_tag = @truncate(u1, cur_bit_bag) != 0; + const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const unused = @truncate(u1, cur_bit_bag) != 0; + const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; @@ -4464,7 +4464,7 @@ fn collectUnionFieldInfo( const doc_comment_index = file.zir.extra[extra_index]; extra_index += 1; const field_type = if (has_type) - @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]) + @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])) else .void_type; if (has_type) extra_index += 1; @@ -4532,13 +4532,13 @@ fn collectStructFieldInfo( cur_bit_bag = file.zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_default = @truncate(u1, cur_bit_bag) != 0; + const has_default = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; // const is_comptime = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; - const has_type_body = @truncate(u1, cur_bit_bag) != 0; + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name: ?u32 = if (!is_tuple) blk: { @@ -4558,7 +4558,7 @@ fn collectStructFieldInfo( if (has_type_body) { fields[field_i].type_body_len = file.zir.extra[extra_index]; } else { - fields[field_i].type_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]); + fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index])); } extra_index += 1; @@ -4855,9 +4855,9 @@ fn srcLocInfo( src_node: i32, parent_src: SrcLocInfo, ) !SrcLocInfo { - const sn = @intCast(u32, @intCast(i32, parent_src.src_node) + src_node); + const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node)); const tree = try file.getTree(self.comp_module.gpa); - const node_idx = @bitCast(Ast.Node.Index, sn); + const node_idx = @as(Ast.Node.Index, @bitCast(sn)); const tokens = tree.nodes.items(.main_token); const tok_idx = tokens[node_idx]; @@ -4876,9 +4876,9 @@ fn declIsVar( src_node: i32, parent_src: SrcLocInfo, ) !bool { - const sn = @intCast(u32, @intCast(i32, parent_src.src_node) + src_node); + const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node)); const tree = try file.getTree(self.comp_module.gpa); - const node_idx = @bitCast(Ast.Node.Index, sn); + const node_idx = @as(Ast.Node.Index, @bitCast(sn)); const tokens = tree.nodes.items(.main_token); const tags = tree.tokens.items(.tag); diff --git a/src/Compilation.zig b/src/Compilation.zig index 55b3ab95f7ef..d9273dcdd869 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1046,7 +1046,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: { var buf = std.ArrayList(u8).init(arena); for (options.target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| { - const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize); + const index = @as(Target.Cpu.Feature.Set.Index, @intCast(index_usize)); const is_enabled = options.target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { @@ -2562,7 +2562,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { } } - return @intCast(u32, total); + return @as(u32, @intCast(total)); } /// This function is temporally single-threaded. @@ -2596,7 +2596,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { } for (self.lld_errors.items) |lld_error| { - const notes_len = @intCast(u32, lld_error.context_lines.len); + const notes_len = @as(u32, @intCast(lld_error.context_lines.len)); try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(lld_error.msg), @@ -2753,7 +2753,7 @@ pub const ErrorNoteHashContext = struct { std.hash.autoHash(&hasher, src.span_main); } - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } pub fn eql( @@ -2830,8 +2830,8 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_start = span.start, .span_main = span.main, .span_end = span.end, - .line = @intCast(u32, loc.line), - .column = @intCast(u32, loc.column), + .line = @as(u32, @intCast(loc.line)), + .column = @as(u32, @intCast(loc.column)), .source_line = 0, }), }); @@ -2842,13 +2842,13 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_start = err_span.start, .span_main = err_span.main, .span_end = err_span.end, - .line = @intCast(u32, err_loc.line), - .column = @intCast(u32, err_loc.column), + .line = @as(u32, @intCast(err_loc.line)), + .column = @as(u32, @intCast(err_loc.column)), .source_line = if (module_err_msg.src_loc.lazy == .entire_file) 0 else try eb.addString(err_loc.source_line), - .reference_trace_len = @intCast(u32, ref_traces.items.len), + .reference_trace_len = @as(u32, @intCast(ref_traces.items.len)), }); for (ref_traces.items) |rt| { @@ -2874,8 +2874,8 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_start = span.start, .span_main = span.main, .span_end = span.end, - .line = @intCast(u32, loc.line), - .column = @intCast(u32, loc.column), + .line = @as(u32, @intCast(loc.line)), + .column = @as(u32, @intCast(loc.column)), .source_line = if (err_loc.eql(loc)) 0 else try eb.addString(loc.source_line), }), }, .{ .eb = eb }); @@ -2884,7 +2884,7 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod } } - const notes_len = @intCast(u32, notes.entries.len); + const notes_len = @as(u32, @intCast(notes.entries.len)); try eb.addRootErrorMessage(.{ .msg = try eb.addString(module_err_msg.msg), @@ -2919,7 +2919,7 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { } const token_starts = file.tree.tokens.items(.start); const start = token_starts[item.data.token] + item.data.byte_offset; - const end = start + @intCast(u32, file.tree.tokenSlice(item.data.token).len) - item.data.byte_offset; + const end = start + @as(u32, @intCast(file.tree.tokenSlice(item.data.token).len)) - item.data.byte_offset; break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start }; }; const err_loc = std.zig.findLineColumn(file.source, err_span.main); @@ -2935,8 +2935,8 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { .span_start = err_span.start, .span_main = err_span.main, .span_end = err_span.end, - .line = @intCast(u32, err_loc.line), - .column = @intCast(u32, err_loc.column), + .line = @as(u32, @intCast(err_loc.line)), + .column = @as(u32, @intCast(err_loc.column)), .source_line = try eb.addString(err_loc.source_line), }), .notes_len = item.data.notesLen(file.zir), @@ -2956,7 +2956,7 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { } const token_starts = file.tree.tokens.items(.start); const start = token_starts[note_item.data.token] + note_item.data.byte_offset; - const end = start + @intCast(u32, file.tree.tokenSlice(note_item.data.token).len) - item.data.byte_offset; + const end = start + @as(u32, @intCast(file.tree.tokenSlice(note_item.data.token).len)) - item.data.byte_offset; break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start }; }; const loc = std.zig.findLineColumn(file.source, span.main); @@ -2970,8 +2970,8 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { .span_start = span.start, .span_main = span.main, .span_end = span.end, - .line = @intCast(u32, loc.line), - .column = @intCast(u32, loc.column), + .line = @as(u32, @intCast(loc.line)), + .column = @as(u32, @intCast(loc.column)), .source_line = if (loc.eql(err_loc)) 0 else @@ -4302,7 +4302,7 @@ pub fn addCCArgs( const all_features_list = target.cpu.arch.allFeaturesList(); try argv.ensureUnusedCapacity(all_features_list.len * 4); for (all_features_list, 0..) |feature, index_usize| { - const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); + const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); const is_enabled = target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { @@ -5172,7 +5172,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca }); for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| { - const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); + const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); const is_enabled = target.cpu.features.isEnabled(index); if (is_enabled) { try buffer.writer().print(" .{},\n", .{std.zig.fmtId(feature.name)}); diff --git a/src/InternPool.zig b/src/InternPool.zig index 33d4108e6ddb..1a89c239ef09 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -80,7 +80,7 @@ const KeyAdapter = struct { pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@enumFromInt(Index, b_map_index)).eql(a, ctx.intern_pool); + return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { @@ -95,7 +95,7 @@ pub const OptionalMapIndex = enum(u32) { pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { if (oi == .none) return null; - return @enumFromInt(MapIndex, @intFromEnum(oi)); + return @as(MapIndex, @enumFromInt(@intFromEnum(oi))); } }; @@ -104,7 +104,7 @@ pub const MapIndex = enum(u32) { _, pub fn toOptional(i: MapIndex) OptionalMapIndex { - return @enumFromInt(OptionalMapIndex, @intFromEnum(i)); + return @as(OptionalMapIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -114,7 +114,7 @@ pub const RuntimeIndex = enum(u32) { _, pub fn increment(ri: *RuntimeIndex) void { - ri.* = @enumFromInt(RuntimeIndex, @intFromEnum(ri.*) + 1); + ri.* = @as(RuntimeIndex, @enumFromInt(@intFromEnum(ri.*) + 1)); } }; @@ -130,11 +130,11 @@ pub const NullTerminatedString = enum(u32) { _, pub fn toString(self: NullTerminatedString) String { - return @enumFromInt(String, @intFromEnum(self)); + return @as(String, @enumFromInt(@intFromEnum(self))); } pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { - return @enumFromInt(OptionalNullTerminatedString, @intFromEnum(self)); + return @as(OptionalNullTerminatedString, @enumFromInt(@intFromEnum(self))); } const Adapter = struct { @@ -196,7 +196,7 @@ pub const OptionalNullTerminatedString = enum(u32) { pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { if (oi == .none) return null; - return @enumFromInt(NullTerminatedString, @intFromEnum(oi)); + return @as(NullTerminatedString, @enumFromInt(@intFromEnum(oi))); } }; @@ -282,7 +282,7 @@ pub const Key = union(enum) { const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; - return @intCast(u32, field_index); + return @as(u32, @intCast(field_index)); } }; @@ -420,7 +420,7 @@ pub const Key = union(enum) { const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; - return @intCast(u32, field_index); + return @as(u32, @intCast(field_index)); } /// Look up field index based on tag value. @@ -440,7 +440,7 @@ pub const Key = union(enum) { const map = &ip.maps.items[@intFromEnum(values_map)]; const adapter: Index.Adapter = .{ .indexes = self.values }; const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; - return @intCast(u32, field_index); + return @as(u32, @intCast(field_index)); } // Auto-numbered enum. Convert `int_tag_val` to field index. const field_index = switch (ip.indexToKey(int_tag_val).int.storage) { @@ -511,12 +511,12 @@ pub const Key = union(enum) { pub fn paramIsComptime(self: @This(), i: u5) bool { assert(i < self.param_types.len); - return @truncate(u1, self.comptime_bits >> i) != 0; + return @as(u1, @truncate(self.comptime_bits >> i)) != 0; } pub fn paramIsNoalias(self: @This(), i: u5) bool { assert(i < self.param_types.len); - return @truncate(u1, self.noalias_bits >> i) != 0; + return @as(u1, @truncate(self.noalias_bits >> i)) != 0; } }; @@ -685,7 +685,7 @@ pub const Key = union(enum) { }; pub fn hash32(key: Key, ip: *const InternPool) u32 { - return @truncate(u32, key.hash64(ip)); + return @as(u32, @truncate(key.hash64(ip))); } pub fn hash64(key: Key, ip: *const InternPool) u64 { @@ -767,7 +767,7 @@ pub const Key = union(enum) { switch (float.storage) { inline else => |val| std.hash.autoHash( &hasher, - @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val), + @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), @bitCast(val)), ), } return hasher.final(); @@ -812,18 +812,18 @@ pub const Key = union(enum) { if (child == .u8_type) { switch (aggregate.storage) { - .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| { + .bytes => |bytes| for (bytes[0..@as(usize, @intCast(len))]) |byte| { std.hash.autoHash(&hasher, KeyTag.int); std.hash.autoHash(&hasher, byte); }, - .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| { + .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| { const elem_key = ip.indexToKey(elem); std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); switch (elem_key) { .undef => {}, .int => |int| std.hash.autoHash( &hasher, - @intCast(u8, int.storage.u64), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, } @@ -837,7 +837,7 @@ pub const Key = union(enum) { .undef => {}, .int => |int| std.hash.autoHash( &hasher, - @intCast(u8, int.storage.u64), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, } @@ -849,7 +849,7 @@ pub const Key = union(enum) { switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| + .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| std.hash.autoHash(&hasher, elem), .repeated_elem => |elem| { var remaining = len; @@ -1061,10 +1061,10 @@ pub const Key = union(enum) { // These are strange: we'll sometimes represent them as f128, even if the // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. const a_val = switch (a_info.storage) { - inline else => |val| @floatCast(f128, val), + inline else => |val| @as(f128, @floatCast(val)), }; const b_val = switch (b_info.storage) { - inline else => |val| @floatCast(f128, val), + inline else => |val| @as(f128, @floatCast(val)), }; return a_val == b_val; } @@ -1092,7 +1092,7 @@ pub const Key = union(enum) { const len = ip.aggregateTypeLen(a_info.ty); const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { - for (0..@intCast(usize, len)) |elem_index| { + for (0..@as(usize, @intCast(len))) |elem_index| { const a_elem = switch (a_info.storage) { .bytes => |bytes| ip.getIfExists(.{ .int = .{ .ty = .u8_type, @@ -1119,16 +1119,16 @@ pub const Key = union(enum) { const b_bytes = b_info.storage.bytes; return std.mem.eql( u8, - a_bytes[0..@intCast(usize, len)], - b_bytes[0..@intCast(usize, len)], + a_bytes[0..@as(usize, @intCast(len))], + b_bytes[0..@as(usize, @intCast(len))], ); }, .elems => |a_elems| { const b_elems = b_info.storage.elems; return std.mem.eql( Index, - a_elems[0..@intCast(usize, len)], - b_elems[0..@intCast(usize, len)], + a_elems[0..@as(usize, @intCast(len))], + b_elems[0..@as(usize, @intCast(len))], ); }, .repeated_elem => |a_elem| { @@ -2291,7 +2291,7 @@ pub const Alignment = enum(u6) { pub fn fromByteUnits(n: u64) Alignment { if (n == 0) return .none; assert(std.math.isPowerOfTwo(n)); - return @enumFromInt(Alignment, @ctz(n)); + return @as(Alignment, @enumFromInt(@ctz(n))); } pub fn fromNonzeroByteUnits(n: u64) Alignment { @@ -2368,11 +2368,11 @@ pub const PackedU64 = packed struct(u64) { b: u32, pub fn get(x: PackedU64) u64 { - return @bitCast(u64, x); + return @as(u64, @bitCast(x)); } pub fn init(x: u64) PackedU64 { - return @bitCast(PackedU64, x); + return @as(PackedU64, @bitCast(x)); } }; @@ -2435,14 +2435,14 @@ pub const Float64 = struct { pub fn get(self: Float64) f64 { const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); - return @bitCast(f64, int_bits); + return @as(f64, @bitCast(int_bits)); } fn pack(val: f64) Float64 { - const bits = @bitCast(u64, val); + const bits = @as(u64, @bitCast(val)); return .{ - .piece0 = @truncate(u32, bits), - .piece1 = @truncate(u32, bits >> 32), + .piece0 = @as(u32, @truncate(bits)), + .piece1 = @as(u32, @truncate(bits >> 32)), }; } }; @@ -2457,15 +2457,15 @@ pub const Float80 = struct { const int_bits = @as(u80, self.piece0) | (@as(u80, self.piece1) << 32) | (@as(u80, self.piece2) << 64); - return @bitCast(f80, int_bits); + return @as(f80, @bitCast(int_bits)); } fn pack(val: f80) Float80 { - const bits = @bitCast(u80, val); + const bits = @as(u80, @bitCast(val)); return .{ - .piece0 = @truncate(u32, bits), - .piece1 = @truncate(u32, bits >> 32), - .piece2 = @truncate(u16, bits >> 64), + .piece0 = @as(u32, @truncate(bits)), + .piece1 = @as(u32, @truncate(bits >> 32)), + .piece2 = @as(u16, @truncate(bits >> 64)), }; } }; @@ -2482,16 +2482,16 @@ pub const Float128 = struct { (@as(u128, self.piece1) << 32) | (@as(u128, self.piece2) << 64) | (@as(u128, self.piece3) << 96); - return @bitCast(f128, int_bits); + return @as(f128, @bitCast(int_bits)); } fn pack(val: f128) Float128 { - const bits = @bitCast(u128, val); + const bits = @as(u128, @bitCast(val)); return .{ - .piece0 = @truncate(u32, bits), - .piece1 = @truncate(u32, bits >> 32), - .piece2 = @truncate(u32, bits >> 64), - .piece3 = @truncate(u32, bits >> 96), + .piece0 = @as(u32, @truncate(bits)), + .piece1 = @as(u32, @truncate(bits >> 32)), + .piece2 = @as(u32, @truncate(bits >> 64)), + .piece3 = @as(u32, @truncate(bits >> 96)), }; } }; @@ -2575,13 +2575,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_int_signed => .{ .int_type = .{ .signedness = .signed, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), }, }, .type_int_unsigned => .{ .int_type = .{ .signedness = .unsigned, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), }, }, .type_array_big => { @@ -2600,8 +2600,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .sentinel = .none, } }; }, - .simple_type => .{ .simple_type = @enumFromInt(SimpleType, data) }, - .simple_value => .{ .simple_value = @enumFromInt(SimpleValue, data) }, + .simple_type => .{ .simple_type = @as(SimpleType, @enumFromInt(data)) }, + .simple_value => .{ .simple_value = @as(SimpleValue, @enumFromInt(data)) }, .type_vector => { const vector_info = ip.extraData(Vector, data); @@ -2620,8 +2620,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { return .{ .ptr_type = ptr_info }; }, - .type_optional => .{ .opt_type = @enumFromInt(Index, data) }, - .type_anyframe => .{ .anyframe_type = @enumFromInt(Index, data) }, + .type_optional => .{ .opt_type = @as(Index, @enumFromInt(data)) }, + .type_anyframe => .{ .anyframe_type = @as(Index, @enumFromInt(data)) }, .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, .type_error_set => { @@ -2629,17 +2629,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const names_len = error_set.data.names_len; const names = ip.extra.items[error_set.end..][0..names_len]; return .{ .error_set_type = .{ - .names = @ptrCast([]const NullTerminatedString, names), + .names = @as([]const NullTerminatedString, @ptrCast(names)), .names_map = error_set.data.names_map.toOptional(), } }; }, .type_inferred_error_set => .{ - .inferred_error_set_type = @enumFromInt(Module.Fn.InferredErrorSet.Index, data), + .inferred_error_set_type = @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(data)), }, .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_struct => { - const struct_index = @enumFromInt(Module.Struct.OptionalIndex, data); + const struct_index = @as(Module.Struct.OptionalIndex, @enumFromInt(data)); const namespace = if (struct_index.unwrap()) |i| ip.structPtrConst(i).namespace.toOptional() else @@ -2651,7 +2651,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .type_struct_ns => .{ .struct_type = .{ .index = .none, - .namespace = @enumFromInt(Module.Namespace.Index, data).toOptional(), + .namespace = @as(Module.Namespace.Index, @enumFromInt(data)).toOptional(), } }, .type_struct_anon => { @@ -2661,9 +2661,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; return .{ .anon_struct_type = .{ - .types = @ptrCast([]const Index, types), - .values = @ptrCast([]const Index, values), - .names = @ptrCast([]const NullTerminatedString, names), + .types = @as([]const Index, @ptrCast(types)), + .values = @as([]const Index, @ptrCast(values)), + .names = @as([]const NullTerminatedString, @ptrCast(names)), } }; }, .type_tuple_anon => { @@ -2672,30 +2672,30 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .anon_struct_type = .{ - .types = @ptrCast([]const Index, types), - .values = @ptrCast([]const Index, values), + .types = @as([]const Index, @ptrCast(types)), + .values = @as([]const Index, @ptrCast(values)), .names = &.{}, } }; }, .type_union_untagged => .{ .union_type = .{ - .index = @enumFromInt(Module.Union.Index, data), + .index = @as(Module.Union.Index, @enumFromInt(data)), .runtime_tag = .none, } }, .type_union_tagged => .{ .union_type = .{ - .index = @enumFromInt(Module.Union.Index, data), + .index = @as(Module.Union.Index, @enumFromInt(data)), .runtime_tag = .tagged, } }, .type_union_safety => .{ .union_type = .{ - .index = @enumFromInt(Module.Union.Index, data), + .index = @as(Module.Union.Index, @enumFromInt(data)), .runtime_tag = .safety, } }, .type_enum_auto => { const enum_auto = ip.extraDataTrail(EnumAuto, data); - const names = @ptrCast( + const names = @as( []const NullTerminatedString, - ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len], + @ptrCast(ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len]), ); return .{ .enum_type = .{ .decl = enum_auto.data.decl, @@ -2712,10 +2712,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, - .undef => .{ .undef = @enumFromInt(Index, data) }, + .undef => .{ .undef = @as(Index, @enumFromInt(data)) }, .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, .opt_null => .{ .opt = .{ - .ty = @enumFromInt(Index, data), + .ty = @as(Index, @enumFromInt(data)), .val = .none, } }, .opt_payload => { @@ -2877,7 +2877,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .int_i32 => .{ .int = .{ .ty = .i32_type, - .storage = .{ .i64 = @bitCast(i32, data) }, + .storage = .{ .i64 = @as(i32, @bitCast(data)) }, } }, .int_usize => .{ .int = .{ .ty = .usize_type, @@ -2889,7 +2889,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .int_comptime_int_i32 => .{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .i64 = @bitCast(i32, data) }, + .storage = .{ .i64 = @as(i32, @bitCast(data)) }, } }, .int_positive => ip.indexToKeyBigInt(data, true), .int_negative => ip.indexToKeyBigInt(data, false), @@ -2913,11 +2913,11 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .float_f16 => .{ .float = .{ .ty = .f16_type, - .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, + .storage = .{ .f16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) }, } }, .float_f32 => .{ .float = .{ .ty = .f32_type, - .storage = .{ .f32 = @bitCast(f32, data) }, + .storage = .{ .f32 = @as(f32, @bitCast(data)) }, } }, .float_f64 => .{ .float = .{ .ty = .f64_type, @@ -2959,13 +2959,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, .func => .{ .func = ip.extraData(Tag.Func, data) }, .only_possible_value => { - const ty = @enumFromInt(Index, data); + const ty = @as(Index, @enumFromInt(data)); const ty_item = ip.items.get(@intFromEnum(ty)); return switch (ty_item.tag) { .type_array_big => { - const sentinel = @ptrCast( + const sentinel = @as( *const [1]Index, - &ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?], + @ptrCast(&ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), ); return .{ .aggregate = .{ .ty = ty, @@ -2994,7 +2994,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .aggregate = .{ .ty = ty, - .storage = .{ .elems = @ptrCast([]const Index, values) }, + .storage = .{ .elems = @as([]const Index, @ptrCast(values)) }, } }; }, @@ -3010,7 +3010,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .bytes => { const extra = ip.extraData(Bytes, data); - const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty)); + const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty))); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] }, @@ -3018,8 +3018,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .aggregate => { const extra = ip.extraDataTrail(Tag.Aggregate, data); - const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); - const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); + const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty))); + const fields = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..len])); return .{ .aggregate = .{ .ty = extra.data.ty, .storage = .{ .elems = fields }, @@ -3048,14 +3048,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .val = .{ .payload = extra.val }, } }; }, - .enum_literal => .{ .enum_literal = @enumFromInt(NullTerminatedString, data) }, + .enum_literal => .{ .enum_literal = @as(NullTerminatedString, @enumFromInt(data)) }, .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, .memoized_call => { const extra = ip.extraDataTrail(MemoizedCall, data); return .{ .memoized_call = .{ .func = extra.data.func, - .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]), + .arg_values = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len])), .result = extra.data.result, } }; }, @@ -3064,9 +3064,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { const type_function = ip.extraDataTrail(TypeFunction, data); - const param_types = @ptrCast( + const param_types = @as( []Index, - ip.extra.items[type_function.end..][0..type_function.data.params_len], + @ptrCast(ip.extra.items[type_function.end..][0..type_function.data.params_len]), ); return .{ .param_types = param_types, @@ -3087,13 +3087,13 @@ fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); - const names = @ptrCast( + const names = @as( []const NullTerminatedString, - ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + @ptrCast(ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len]), ); - const values = if (enum_explicit.data.values_map != .none) @ptrCast( + const values = if (enum_explicit.data.values_map != .none) @as( []const Index, - ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + @ptrCast(ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len]), ) else &[0]Index{}; return .{ .enum_type = .{ @@ -3122,7 +3122,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return @enumFromInt(Index, gop.index); + if (gop.found_existing) return @as(Index, @enumFromInt(gop.index)); try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -3150,7 +3150,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .type_slice, .data = @intFromEnum(ptr_type_index), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } var ptr_type_adjusted = ptr_type; @@ -3174,7 +3174,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = array_type.child, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } } @@ -3223,7 +3223,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); const names_map = try ip.addMap(gpa); try addStringsToMap(ip, gpa, names_map, error_set_type.names); - const names_len = @intCast(u32, error_set_type.names.len); + const names_len = @as(u32, @intCast(error_set_type.names.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); ip.items.appendAssumeCapacity(.{ .tag = .type_error_set, @@ -3232,7 +3232,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .names_map = names_map, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(error_set_type.names))); }, .inferred_error_set_type => |ies_index| { ip.items.appendAssumeCapacity(.{ @@ -3284,7 +3284,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(anon_struct_type.types.len == anon_struct_type.values.len); for (anon_struct_type.types) |elem| assert(elem != .none); - const fields_len = @intCast(u32, anon_struct_type.types.len); + const fields_len = @as(u32, @intCast(anon_struct_type.types.len)); if (anon_struct_type.names.len == 0) { try ip.extra.ensureUnusedCapacity( gpa, @@ -3296,9 +3296,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .fields_len = fields_len, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.types))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.values))); + return @as(Index, @enumFromInt(ip.items.len - 1)); } assert(anon_struct_type.names.len == anon_struct_type.types.len); @@ -3313,10 +3313,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .fields_len = fields_len, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.types))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.values))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.names))); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, .union_type => |union_type| { @@ -3348,7 +3348,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const names_map = try ip.addMap(gpa); try addStringsToMap(ip, gpa, names_map, enum_type.names); - const fields_len = @intCast(u32, enum_type.names.len); + const fields_len = @as(u32, @intCast(enum_type.names.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + fields_len); ip.items.appendAssumeCapacity(.{ @@ -3361,8 +3361,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .fields_len = fields_len, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names))); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit), .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive), @@ -3373,7 +3373,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(func_type.return_type != .none); for (func_type.param_types) |param_type| assert(param_type != .none); - const params_len = @intCast(u32, func_type.param_types.len); + const params_len = @as(u32, @intCast(func_type.param_types.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + params_len); @@ -3397,7 +3397,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(func_type.param_types))); }, .variable => |variable| { @@ -3559,7 +3559,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, } - assert(ptr.ty == ip.indexToKey(@enumFromInt(Index, ip.items.len - 1)).ptr.ty); + assert(ptr.ty == ip.indexToKey(@as(Index, @enumFromInt(ip.items.len - 1))).ptr.ty); }, .opt => |opt| { @@ -3593,7 +3593,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .lazy_ty = lazy_ty, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, } switch (int.ty) { @@ -3608,7 +3608,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_u8, - .data = @intCast(u8, x), + .data = @as(u8, @intCast(x)), }); break :b; }, @@ -3625,7 +3625,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_u16, - .data = @intCast(u16, x), + .data = @as(u16, @intCast(x)), }); break :b; }, @@ -3642,7 +3642,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_u32, - .data = @intCast(u32, x), + .data = @as(u32, @intCast(x)), }); break :b; }, @@ -3653,14 +3653,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const casted = big_int.to(i32) catch unreachable; ip.items.appendAssumeCapacity(.{ .tag = .int_i32, - .data = @bitCast(u32, casted), + .data = @as(u32, @bitCast(casted)), }); break :b; }, inline .u64, .i64 => |x| { ip.items.appendAssumeCapacity(.{ .tag = .int_i32, - .data = @bitCast(u32, @intCast(i32, x)), + .data = @as(u32, @bitCast(@as(i32, @intCast(x)))), }); break :b; }, @@ -3699,7 +3699,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (big_int.to(i32)) |casted| { ip.items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, - .data = @bitCast(u32, casted), + .data = @as(u32, @bitCast(casted)), }); break :b; } else |_| {} @@ -3715,7 +3715,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (std.math.cast(i32, x)) |casted| { ip.items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, - .data = @bitCast(u32, casted), + .data = @as(u32, @bitCast(casted)), }); break :b; } @@ -3734,7 +3734,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .value = casted, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } else |_| {} const tag: Tag = if (big_int.positive) .int_positive else .int_negative; @@ -3749,7 +3749,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .value = casted, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } var buf: [2]Limb = undefined; @@ -3816,11 +3816,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { switch (float.ty) { .f16_type => ip.items.appendAssumeCapacity(.{ .tag = .float_f16, - .data = @bitCast(u16, float.storage.f16), + .data = @as(u16, @bitCast(float.storage.f16)), }), .f32_type => ip.items.appendAssumeCapacity(.{ .tag = .float_f32, - .data = @bitCast(u32, float.storage.f32), + .data = @as(u32, @bitCast(float.storage.f32)), }), .f64_type => ip.items.appendAssumeCapacity(.{ .tag = .float_f64, @@ -3872,13 +3872,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(child == .u8_type); if (bytes.len != len) { assert(bytes.len == len_including_sentinel); - assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64); + assert(bytes[@as(usize, @intCast(len))] == ip.indexToKey(sentinel).int.storage.u64); } }, .elems => |elems| { if (elems.len != len) { assert(elems.len == len_including_sentinel); - assert(elems[@intCast(usize, len)] == sentinel); + assert(elems[@as(usize, @intCast(len))] == sentinel); } }, .repeated_elem => |elem| { @@ -3912,7 +3912,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } switch (ty_key) { @@ -3940,16 +3940,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); }, else => {}, } repeated: { switch (aggregate.storage) { - .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte| + .bytes => |bytes| for (bytes[1..@as(usize, @intCast(len))]) |byte| if (byte != bytes[0]) break :repeated, - .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem| + .elems => |elems| for (elems[1..@as(usize, @intCast(len))]) |elem| if (elem != elems[0]) break :repeated, .repeated_elem => {}, } @@ -3979,12 +3979,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .elem_val = elem, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } if (child == .u8_type) bytes: { const string_bytes_index = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1)); + try ip.string_bytes.ensureUnusedCapacity(gpa, @as(usize, @intCast(len_including_sentinel + 1))); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), @@ -3994,15 +3994,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :bytes; }, .int => |int| ip.string_bytes.appendAssumeCapacity( - @intCast(u8, int.storage.u64), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, }, .repeated_elem => |elem| switch (ip.indexToKey(elem)) { .undef => break :bytes, .int => |int| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)), - @intCast(u8, int.storage.u64), + ip.string_bytes.addManyAsSliceAssumeCapacity(@as(usize, @intCast(len))), + @as(u8, @intCast(int.storage.u64)), ), else => unreachable, }, @@ -4010,12 +4010,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const has_internal_null = std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null; if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( - @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), + @as(u8, @intCast(ip.indexToKey(sentinel).int.storage.u64)), ); const string = if (has_internal_null) - @enumFromInt(String, string_bytes_index) + @as(String, @enumFromInt(string_bytes_index)) else - (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString(); + (try ip.getOrPutTrailingString(gpa, @as(usize, @intCast(len_including_sentinel)))).toString(); ip.items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ @@ -4023,12 +4023,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .bytes = string, }), }); - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel), + @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel)), ); ip.items.appendAssumeCapacity(.{ .tag = .aggregate, @@ -4036,7 +4036,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .ty = aggregate.ty, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(aggregate.storage.elems))); if (sentinel != .none) ip.extra.appendAssumeCapacity(@intFromEnum(sentinel)); }, @@ -4058,14 +4058,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .memoized_call, .data = ip.addExtraAssumeCapacity(MemoizedCall{ .func = memoized_call.func, - .args_len = @intCast(u32, memoized_call.arg_values.len), + .args_len = @as(u32, @intCast(memoized_call.arg_values.len)), .result = memoized_call.result, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values)); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(memoized_call.arg_values))); }, } - return @enumFromInt(Index, ip.items.len - 1); + return @as(Index, @enumFromInt(ip.items.len - 1)); } /// Provides API for completing an enum type after calling `getIncompleteEnum`. @@ -4093,10 +4093,10 @@ pub const IncompleteEnumType = struct { const field_index = map.count(); const strings = ip.extra.items[self.names_start..][0..field_index]; const adapter: NullTerminatedString.Adapter = .{ - .strings = @ptrCast([]const NullTerminatedString, strings), + .strings = @as([]const NullTerminatedString, @ptrCast(strings)), }; const gop = try map.getOrPutAdapted(gpa, name, adapter); - if (gop.found_existing) return @intCast(u32, gop.index); + if (gop.found_existing) return @as(u32, @intCast(gop.index)); ip.extra.items[self.names_start + field_index] = @intFromEnum(name); return null; } @@ -4109,15 +4109,15 @@ pub const IncompleteEnumType = struct { gpa: Allocator, value: Index, ) Allocator.Error!?u32 { - assert(ip.typeOf(value) == @enumFromInt(Index, ip.extra.items[self.tag_ty_index])); + assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index]))); const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)]; const field_index = map.count(); const indexes = ip.extra.items[self.values_start..][0..field_index]; const adapter: Index.Adapter = .{ - .indexes = @ptrCast([]const Index, indexes), + .indexes = @as([]const Index, @ptrCast(indexes)), }; const gop = try map.getOrPutAdapted(gpa, value, adapter); - if (gop.found_existing) return @intCast(u32, gop.index); + if (gop.found_existing) return @as(u32, @intCast(gop.index)); ip.extra.items[self.values_start + field_index] = @intFromEnum(value); return null; } @@ -4177,7 +4177,7 @@ fn getIncompleteEnumAuto( }); ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len); return .{ - .index = @enumFromInt(Index, ip.items.len - 1), + .index = @as(Index, @enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .names_map = names_map, .names_start = extra_index + extra_fields_len, @@ -4228,7 +4228,7 @@ fn getIncompleteEnumExplicit( // This is both fields and values (if present). ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len); return .{ - .index = @enumFromInt(Index, ip.items.len - 1), + .index = @as(Index, @enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, .names_map = names_map, .names_start = extra_index + extra_fields_len, @@ -4251,7 +4251,7 @@ pub fn finishGetEnum( try addIndexesToMap(ip, gpa, values_map, enum_type.values); break :m values_map.toOptional(); }; - const fields_len = @intCast(u32, enum_type.names.len); + const fields_len = @as(u32, @intCast(enum_type.names.len)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + fields_len); ip.items.appendAssumeCapacity(.{ @@ -4265,15 +4265,15 @@ pub fn finishGetEnum( .values_map = values_map, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); - return @enumFromInt(Index, ip.items.len - 1); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names))); + ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.values))); + return @as(Index, @enumFromInt(ip.items.len - 1)); } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const index = ip.map.getIndexAdapted(key, adapter) orelse return null; - return @enumFromInt(Index, index); + return @as(Index, @enumFromInt(index)); } pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { @@ -4311,7 +4311,7 @@ fn addIndexesToMap( fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { const ptr = try ip.maps.addOne(gpa); ptr.* = .{}; - return @enumFromInt(MapIndex, ip.maps.items.len - 1); + return @as(MapIndex, @enumFromInt(ip.maps.items.len - 1)); } /// This operation only happens under compile error conditions. @@ -4320,7 +4320,7 @@ fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead"); fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { - const limbs_len = @intCast(u32, limbs.len); + const limbs_len = @as(u32, @intCast(limbs.len)); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); ip.items.appendAssumeCapacity(.{ .tag = tag, @@ -4339,7 +4339,7 @@ fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 } fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const result = @intCast(u32, ip.extra.items.len); + const result = @as(u32, @intCast(ip.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), @@ -4354,12 +4354,12 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { String => @intFromEnum(@field(extra, field.name)), NullTerminatedString => @intFromEnum(@field(extra, field.name)), OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)), - i32 => @bitCast(u32, @field(extra, field.name)), - Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)), - TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), - Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), + Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))), + TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))), Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)), - Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)), + Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -4380,7 +4380,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { @sizeOf(u64) => {}, else => @compileError("unsupported host"), } - const result = @intCast(u32, ip.limbs.items.len); + const result = @as(u32, @intCast(ip.limbs.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { const new: u32 = switch (field.type) { u32 => @field(extra, field.name), @@ -4411,23 +4411,23 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct const int32 = ip.extra.items[i + index]; @field(result, field.name) = switch (field.type) { u32 => int32, - Index => @enumFromInt(Index, int32), - Module.Decl.Index => @enumFromInt(Module.Decl.Index, int32), - Module.Namespace.Index => @enumFromInt(Module.Namespace.Index, int32), - Module.Namespace.OptionalIndex => @enumFromInt(Module.Namespace.OptionalIndex, int32), - Module.Fn.Index => @enumFromInt(Module.Fn.Index, int32), - MapIndex => @enumFromInt(MapIndex, int32), - OptionalMapIndex => @enumFromInt(OptionalMapIndex, int32), - RuntimeIndex => @enumFromInt(RuntimeIndex, int32), - String => @enumFromInt(String, int32), - NullTerminatedString => @enumFromInt(NullTerminatedString, int32), - OptionalNullTerminatedString => @enumFromInt(OptionalNullTerminatedString, int32), - i32 => @bitCast(i32, int32), - Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32), - TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), - Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32), - Tag.TypePointer.VectorIndex => @enumFromInt(Tag.TypePointer.VectorIndex, int32), - Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32), + Index => @as(Index, @enumFromInt(int32)), + Module.Decl.Index => @as(Module.Decl.Index, @enumFromInt(int32)), + Module.Namespace.Index => @as(Module.Namespace.Index, @enumFromInt(int32)), + Module.Namespace.OptionalIndex => @as(Module.Namespace.OptionalIndex, @enumFromInt(int32)), + Module.Fn.Index => @as(Module.Fn.Index, @enumFromInt(int32)), + MapIndex => @as(MapIndex, @enumFromInt(int32)), + OptionalMapIndex => @as(OptionalMapIndex, @enumFromInt(int32)), + RuntimeIndex => @as(RuntimeIndex, @enumFromInt(int32)), + String => @as(String, @enumFromInt(int32)), + NullTerminatedString => @as(NullTerminatedString, @enumFromInt(int32)), + OptionalNullTerminatedString => @as(OptionalNullTerminatedString, @enumFromInt(int32)), + i32 => @as(i32, @bitCast(int32)), + Tag.TypePointer.Flags => @as(Tag.TypePointer.Flags, @bitCast(int32)), + TypeFunction.Flags => @as(TypeFunction.Flags, @bitCast(int32)), + Tag.TypePointer.PackedOffset => @as(Tag.TypePointer.PackedOffset, @bitCast(int32)), + Tag.TypePointer.VectorIndex => @as(Tag.TypePointer.VectorIndex, @enumFromInt(int32)), + Tag.Variable.Flags => @as(Tag.Variable.Flags, @bitCast(int32)), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -4452,13 +4452,13 @@ fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { const host_int = ip.limbs.items[index + i / 2]; const int32 = if (i % 2 == 0) - @truncate(u32, host_int) + @as(u32, @truncate(host_int)) else - @truncate(u32, host_int >> 32); + @as(u32, @truncate(host_int >> 32)); @field(result, field.name) = switch (field.type) { u32 => int32, - Index => @enumFromInt(Index, int32), + Index => @as(Index, @enumFromInt(int32)), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -4494,8 +4494,8 @@ fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes }; // TODO: https://github.com/ziglang/zig/issues/1738 return .{ - .start = @intCast(u32, @divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb))), - .len = @intCast(u32, limbs.len), + .start = @as(u32, @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb)))), + .len = @as(u32, @intCast(limbs.len)), }; } @@ -4557,7 +4557,7 @@ pub fn slicePtrType(ip: *const InternPool, i: Index) Index { } const item = ip.items.get(@intFromEnum(i)); switch (item.tag) { - .type_slice => return @enumFromInt(Index, item.data), + .type_slice => return @as(Index, @enumFromInt(item.data)), else => unreachable, // not a slice type } } @@ -4727,7 +4727,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .val = error_union.val, } }), .aggregate => |aggregate| { - const new_len = @intCast(usize, ip.aggregateTypeLen(new_ty)); + const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty))); direct: { const old_ty_child = switch (ip.indexToKey(old_ty)) { inline .array_type, .vector_type => |seq_type| seq_type.child, @@ -4862,7 +4862,7 @@ pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.Option const tags = ip.items.items(.tag); if (tags[@intFromEnum(val)] != .type_struct) return .none; const datas = ip.items.items(.data); - return @enumFromInt(Module.Struct.Index, datas[@intFromEnum(val)]).toOptional(); + return @as(Module.Struct.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); } pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { @@ -4873,7 +4873,7 @@ pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.Optional else => return .none, } const datas = ip.items.items(.data); - return @enumFromInt(Module.Union.Index, datas[@intFromEnum(val)]).toOptional(); + return @as(Module.Union.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); } pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { @@ -4899,7 +4899,7 @@ pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn. const tags = ip.items.items(.tag); if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none; const datas = ip.items.items(.data); - return @enumFromInt(Module.Fn.InferredErrorSet.Index, datas[@intFromEnum(val)]).toOptional(); + return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); } /// includes .comptime_int_type @@ -5057,7 +5057,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => b: { - const struct_index = @enumFromInt(Module.Struct.Index, data); + const struct_index = @as(Module.Struct.Index, @enumFromInt(data)); const struct_obj = ip.structPtrConst(struct_index); break :b @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @@ -5124,13 +5124,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .bytes => b: { const info = ip.extraData(Bytes, data); - const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty))); break :b @sizeOf(Bytes) + len + @intFromBool(ip.string_bytes.items[@intFromEnum(info.bytes) + len - 1] != 0); }, .aggregate => b: { const info = ip.extraData(Tag.Aggregate, data); - const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + const fields_len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty))); break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); }, .repeated => @sizeOf(Repeated), @@ -5181,8 +5181,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { for (tags, datas, 0..) |tag, data, i| { try w.print("${d} = {s}(", .{ i, @tagName(tag) }); switch (tag) { - .simple_type => try w.print("{s}", .{@tagName(@enumFromInt(SimpleType, data))}), - .simple_value => try w.print("{s}", .{@tagName(@enumFromInt(SimpleValue, data))}), + .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), + .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), .type_int_signed, .type_int_unsigned, @@ -5311,7 +5311,7 @@ pub fn createStruct( } const ptr = try ip.allocated_structs.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Struct.Index, ip.allocated_structs.len - 1); + return @as(Module.Struct.Index, @enumFromInt(ip.allocated_structs.len - 1)); } pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void { @@ -5333,7 +5333,7 @@ pub fn createUnion( } const ptr = try ip.allocated_unions.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Union.Index, ip.allocated_unions.len - 1); + return @as(Module.Union.Index, @enumFromInt(ip.allocated_unions.len - 1)); } pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void { @@ -5355,7 +5355,7 @@ pub fn createFunc( } const ptr = try ip.allocated_funcs.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Fn.Index, ip.allocated_funcs.len - 1); + return @as(Module.Fn.Index, @enumFromInt(ip.allocated_funcs.len - 1)); } pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { @@ -5377,7 +5377,7 @@ pub fn createInferredErrorSet( } const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); ptr.* = initialization; - return @enumFromInt(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); + return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1)); } pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { @@ -5406,7 +5406,7 @@ pub fn getOrPutStringFmt( args: anytype, ) Allocator.Error!NullTerminatedString { // ensure that references to string_bytes in args do not get invalidated - const len = @intCast(usize, std.fmt.count(format, args) + 1); + const len = @as(usize, @intCast(std.fmt.count(format, args) + 1)); try ip.string_bytes.ensureUnusedCapacity(gpa, len); ip.string_bytes.writer(undefined).print(format, args) catch unreachable; ip.string_bytes.appendAssumeCapacity(0); @@ -5430,7 +5430,7 @@ pub fn getOrPutTrailingString( len: usize, ) Allocator.Error!NullTerminatedString { const string_bytes = &ip.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len - len); + const str_index = @as(u32, @intCast(string_bytes.items.len - len)); if (len > 0 and string_bytes.getLast() == 0) { _ = string_bytes.pop(); } else { @@ -5444,11 +5444,11 @@ pub fn getOrPutTrailingString( }); if (gop.found_existing) { string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(NullTerminatedString, gop.key_ptr.*); + return @as(NullTerminatedString, @enumFromInt(gop.key_ptr.*)); } else { gop.key_ptr.* = str_index; string_bytes.appendAssumeCapacity(0); - return @enumFromInt(NullTerminatedString, str_index); + return @as(NullTerminatedString, @enumFromInt(str_index)); } } @@ -5456,7 +5456,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ .bytes = &ip.string_bytes, })) |index| { - return @enumFromInt(NullTerminatedString, index).toOptional(); + return @as(NullTerminatedString, @enumFromInt(index)).toOptional(); } else { return .none; } @@ -5596,7 +5596,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .undef, .opt_null, .only_possible_value, - => @enumFromInt(Index, ip.items.items(.data)[@intFromEnum(index)]), + => @as(Index, @enumFromInt(ip.items.items(.data)[@intFromEnum(index)])), .simple_value => unreachable, // handled via Index above @@ -5628,7 +5628,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { => |t| { const extra_index = ip.items.items(.data)[@intFromEnum(index)]; const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; - return @enumFromInt(Index, ip.extra.items[extra_index + field_index]); + return @as(Index, @enumFromInt(ip.extra.items[extra_index + field_index])); }, .int_u8 => .u8_type, @@ -5670,7 +5670,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { /// Assumes that the enum's field indexes equal its value tags. pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; - return @enumFromInt(E, ip.indexToKey(int).int.storage.u64); + return @as(E, @enumFromInt(ip.indexToKey(int).int.storage.u64)); } pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { @@ -5703,9 +5703,9 @@ pub fn funcReturnType(ip: *const InternPool, ty: Index) Index { else => unreachable, }; assert(child_item.tag == .type_function); - return @enumFromInt(Index, ip.extra.items[ + return @as(Index, @enumFromInt(ip.extra.items[ child_item.data + std.meta.fieldIndex(TypeFunction, "return_type").? - ]); + ])); } pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { @@ -5736,9 +5736,9 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) Module.Decl.OptionalInd switch (ip.items.items(.tag)[base]) { inline .ptr_decl, .ptr_mut_decl, - => |tag| return @enumFromInt(Module.Decl.OptionalIndex, ip.extra.items[ + => |tag| return @as(Module.Decl.OptionalIndex, @enumFromInt(ip.extra.items[ ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "decl").? - ]), + ])), inline .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, diff --git a/src/Liveness.zig b/src/Liveness.zig index 1141b8620c9a..ab7c61275805 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -178,14 +178,14 @@ pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocat pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { const usize_index = (inst * bpi) / @bitSizeOf(usize); - return @truncate(Bpi, l.tomb_bits[usize_index] >> - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); + return @as(Bpi, @truncate(l.tomb_bits[usize_index] >> + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)))); } pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1))); return (l.tomb_bits[usize_index] & mask) != 0; } @@ -193,7 +193,7 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand)); return (l.tomb_bits[usize_index] & mask) != 0; } @@ -201,7 +201,7 @@ pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand)); l.tomb_bits[usize_index] &= ~mask; } @@ -484,11 +484,11 @@ pub fn categorizeOperand( const inst_data = air_datas[inst].pl_op; const callee = inst_data.operand; const extra = air.extraData(Air.Call, inst_data.payload); - const args = @ptrCast([]const Air.Inst.Ref, air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra[extra.end..][0..extra.data.args_len])); if (args.len + 1 <= bpi - 1) { if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); for (args, 0..) |arg, i| { - if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i + 1), .write); + if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i + 1)), .write); } return .write; } @@ -535,12 +535,12 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); - const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra[ty_pl.payload..][0..len])); if (elements.len <= bpi - 1) { for (elements, 0..) |elem, i| { - if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i), .none); + if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i)), .none); } return .none; } @@ -808,20 +808,20 @@ pub const BigTomb = struct { const small_tombs = bpi - 1; if (this_bit_index < small_tombs) { - const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0; + const dies = @as(u1, @truncate(bt.tomb_bits >> @as(Liveness.OperandInt, @intCast(this_bit_index)))) != 0; return dies; } const big_bit_index = this_bit_index - small_tombs; while (big_bit_index - bt.extra_offset * 31 >= 31) { - if (@truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >> 31) != 0) { + if (@as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> 31)) != 0) { bt.reached_end = true; return false; } bt.extra_offset += 1; } - const dies = @truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >> - @intCast(u5, big_bit_index - bt.extra_offset * 31)) != 0; + const dies = @as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> + @as(u5, @intCast(big_bit_index - bt.extra_offset * 31)))) != 0; return dies; } }; @@ -838,7 +838,7 @@ const Analysis = struct { fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -849,7 +849,7 @@ const Analysis = struct { fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, a.extra.items.len); + const result = @as(u32, @intCast(a.extra.items.len)); inline for (fields) |field| { a.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), @@ -1108,7 +1108,7 @@ fn analyzeInst( const inst_data = inst_datas[inst].pl_op; const callee = inst_data.operand; const extra = a.air.extraData(Air.Call, inst_data.payload); - const args = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra.end..][0..extra.data.args_len])); if (args.len + 1 <= bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); buf[0] = callee; @@ -1146,8 +1146,8 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); - const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[ty_pl.payload..][0..len])); if (elements.len <= bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); @@ -1200,9 +1200,9 @@ fn analyzeInst( .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const num_operands = simple: { @@ -1310,7 +1310,7 @@ fn analyzeOperands( // Don't compute any liveness for constants if (inst_tags[operand] == .interned) continue; - const mask = @as(Bpi, 1) << @intCast(OperandInt, i); + const mask = @as(Bpi, 1) << @as(OperandInt, @intCast(i)); if ((try data.live_set.fetchPut(gpa, operand, {})) == null) { log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, inst, operand }); @@ -1320,7 +1320,7 @@ fn analyzeOperands( } a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << - @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); + @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)); }, } } @@ -1472,7 +1472,7 @@ fn analyzeInstLoop( const num_breaks = data.breaks.count(); try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks); - const extra_index = @intCast(u32, a.extra.items.len); + const extra_index = @as(u32, @intCast(a.extra.items.len)); a.extra.appendAssumeCapacity(num_breaks); var it = data.breaks.keyIterator(); @@ -1523,7 +1523,7 @@ fn analyzeInstLoop( // This is necessarily not in the same control flow branch, because loops are noreturn data.live_set.clearRetainingCapacity(); - try data.live_set.ensureUnusedCapacity(gpa, @intCast(u32, loop_live.len)); + try data.live_set.ensureUnusedCapacity(gpa, @as(u32, @intCast(loop_live.len))); for (loop_live) |alive| { data.live_set.putAssumeCapacity(alive, {}); } @@ -1647,8 +1647,8 @@ fn analyzeInstCondBr( log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); // Write the mirrored deaths to `extra` - const then_death_count = @intCast(u32, then_mirrored_deaths.items.len); - const else_death_count = @intCast(u32, else_mirrored_deaths.items.len); + const then_death_count = @as(u32, @intCast(then_mirrored_deaths.items.len)); + const else_death_count = @as(u32, @intCast(else_mirrored_deaths.items.len)); try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -1758,12 +1758,12 @@ fn analyzeInstSwitchBr( log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); } - const else_death_count = @intCast(u32, mirrored_deaths[ncases].items.len); + const else_death_count = @as(u32, @intCast(mirrored_deaths[ncases].items.len)); const extra_index = try a.addExtra(SwitchBr{ .else_death_count = else_death_count, }); for (mirrored_deaths[0..ncases]) |mirrored| { - const num = @intCast(u32, mirrored.items.len); + const num = @as(u32, @intCast(mirrored.items.len)); try a.extra.ensureUnusedCapacity(gpa, num + 1); a.extra.appendAssumeCapacity(num); a.extra.appendSliceAssumeCapacity(mirrored.items); @@ -1798,7 +1798,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { inst: Air.Inst.Index, total_operands: usize, ) !Self { - const extra_operands = @intCast(u32, total_operands) -| (bpi - 1); + const extra_operands = @as(u32, @intCast(total_operands)) -| (bpi - 1); const max_extra_tombs = (extra_operands + 30) / 31; const extra_tombs: []u32 = switch (pass) { @@ -1818,7 +1818,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { .a = a, .data = data, .inst = inst, - .operands_remaining = @intCast(u32, total_operands), + .operands_remaining = @as(u32, @intCast(total_operands)), .extra_tombs = extra_tombs, .will_die_immediately = will_die_immediately, }; @@ -1847,7 +1847,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; - const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); + const extra_bit = @as(u5, @intCast(big.operands_remaining - (bpi - 1) - extra_byte * 31)); const gpa = big.a.gpa; @@ -1881,7 +1881,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // keep at least one. var num: usize = big.extra_tombs.len; while (num > 1) { - if (@truncate(u31, big.extra_tombs[num - 1]) != 0) { + if (@as(u31, @truncate(big.extra_tombs[num - 1])) != 0) { // Some operand dies here break; } @@ -1892,7 +1892,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { const extra_tombs = big.extra_tombs[0..num]; - const extra_index = @intCast(u32, big.a.extra.items.len); + const extra_index = @as(u32, @intCast(big.a.extra.items.len)); try big.a.extra.appendSlice(gpa, extra_tombs); try big.a.special.put(gpa, big.inst, extra_index); }, diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 904e38007353..128a2d69b789 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -325,8 +325,8 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); var bt = self.liveness.iterateBigTomb(inst); for (elements) |element| { @@ -337,9 +337,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast( + const args = @as( []const Air.Inst.Ref, - self.air.extra[extra.end..][0..extra.data.args_len], + @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]), ); var bt = self.liveness.iterateBigTomb(inst); @@ -353,14 +353,14 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); var extra_i = extra.end; - const outputs = @ptrCast( + const outputs = @as( []const Air.Inst.Ref, - self.air.extra[extra_i..][0..extra.data.outputs_len], + @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]), ); extra_i += outputs.len; - const inputs = @ptrCast( + const inputs = @as( []const Air.Inst.Ref, - self.air.extra[extra_i..][0..extra.data.inputs_len], + @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]), ); extra_i += inputs.len; @@ -521,9 +521,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast( + const items = @as( []const Air.Inst.Ref, - self.air.extra[case.end..][0..case.data.items_len], + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]), ); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -576,7 +576,7 @@ fn verifyInstOperands( operands: [Liveness.bpi - 1]Air.Inst.Ref, ) Error!void { for (operands, 0..) |operand, operand_index| { - const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); + const dies = self.liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(operand_index))); try self.verifyOperand(inst, operand, dies); } try self.verifyInst(inst); diff --git a/src/Manifest.zig b/src/Manifest.zig index 0549287e60d4..199663556d4e 100644 --- a/src/Manifest.zig +++ b/src/Manifest.zig @@ -102,7 +102,7 @@ pub fn hex64(x: u64) [16]u8 { var result: [16]u8 = undefined; var i: usize = 0; while (i < 8) : (i += 1) { - const byte = @truncate(u8, x >> @intCast(u6, 8 * i)); + const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i)))); result[i * 2 + 0] = hex_charset[byte >> 4]; result[i * 2 + 1] = hex_charset[byte & 15]; } @@ -284,7 +284,7 @@ const Parse = struct { @errorName(err), }); }; - if (@enumFromInt(MultihashFunction, their_multihash_func) != multihash_function) { + if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) { return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{}); } } @@ -345,7 +345,7 @@ const Parse = struct { .invalid_escape_character => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid escape character: '{c}'", .{raw_string[bad_index]}, ); @@ -353,7 +353,7 @@ const Parse = struct { .expected_hex_digit => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit, found '{c}'", .{raw_string[bad_index]}, ); @@ -361,7 +361,7 @@ const Parse = struct { .empty_unicode_escape_sequence => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "empty unicode escape sequence", .{}, ); @@ -369,7 +369,7 @@ const Parse = struct { .expected_hex_digit_or_rbrace => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected hex digit or '}}', found '{c}'", .{raw_string[bad_index]}, ); @@ -377,7 +377,7 @@ const Parse = struct { .invalid_unicode_codepoint => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "unicode escape does not correspond to a valid codepoint", .{}, ); @@ -385,7 +385,7 @@ const Parse = struct { .expected_lbrace => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '{{', found '{c}", .{raw_string[bad_index]}, ); @@ -393,7 +393,7 @@ const Parse = struct { .expected_rbrace => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected '}}', found '{c}", .{raw_string[bad_index]}, ); @@ -401,7 +401,7 @@ const Parse = struct { .expected_single_quote => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "expected single quote ('), found '{c}", .{raw_string[bad_index]}, ); @@ -409,7 +409,7 @@ const Parse = struct { .invalid_character => |bad_index| { try p.appendErrorOff( token, - offset + @intCast(u32, bad_index), + offset + @as(u32, @intCast(bad_index)), "invalid byte in string or character literal: '{c}'", .{raw_string[bad_index]}, ); diff --git a/src/Module.zig b/src/Module.zig index 70b9c9bdbb88..f88f0475785b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -554,7 +554,7 @@ pub const Decl = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -563,12 +563,12 @@ pub const Decl = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -619,7 +619,7 @@ pub const Decl = struct { pub fn contentsHashZir(decl: Decl, zir: Zir) std.zig.SrcHash { assert(decl.zir_decl_index != 0); const hash_u32s = zir.extra[decl.zir_decl_index..][0..4]; - const contents_hash = @bitCast(std.zig.SrcHash, hash_u32s.*); + const contents_hash = @as(std.zig.SrcHash, @bitCast(hash_u32s.*)); return contents_hash; } @@ -633,7 +633,7 @@ pub const Decl = struct { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); const zir = decl.getFileScope(mod).zir; - return @enumFromInt(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]); + return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[decl.zir_decl_index + 8])); } pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref { @@ -641,7 +641,7 @@ pub const Decl = struct { assert(decl.zir_decl_index != 0); const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @intFromBool(decl.has_align); - return @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); } pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref { @@ -649,7 +649,7 @@ pub const Decl = struct { assert(decl.zir_decl_index != 0); const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @intFromBool(decl.has_align) + 1; - return @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); } pub fn relativeToLine(decl: Decl, offset: u32) u32 { @@ -657,11 +657,11 @@ pub const Decl = struct { } pub fn relativeToNodeIndex(decl: Decl, offset: i32) Ast.Node.Index { - return @bitCast(Ast.Node.Index, offset + @bitCast(i32, decl.src_node)); + return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(decl.src_node)))); } pub fn nodeIndexToRelative(decl: Decl, node_index: Ast.Node.Index) i32 { - return @bitCast(i32, node_index) - @bitCast(i32, decl.src_node); + return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(decl.src_node)); } pub fn tokSrcLoc(decl: Decl, token_index: Ast.TokenIndex) LazySrcLoc { @@ -864,7 +864,7 @@ pub const Decl = struct { pub fn getAlignment(decl: Decl, mod: *Module) u32 { assert(decl.has_tv); - return @intCast(u32, decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)); + return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod))); } pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void { @@ -922,7 +922,7 @@ pub const Struct = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -931,12 +931,12 @@ pub const Struct = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -964,7 +964,7 @@ pub const Struct = struct { ) u32 { if (field.abi_align.toByteUnitsOptional()) |abi_align| { assert(layout != .Packed); - return @intCast(u32, abi_align); + return @as(u32, @intCast(abi_align)); } const target = mod.getTarget(); @@ -1042,7 +1042,7 @@ pub const Struct = struct { var bit_sum: u64 = 0; for (s.fields.values(), 0..) |field, i| { if (i == index) { - return @intCast(u16, bit_sum); + return @as(u16, @intCast(bit_sum)); } bit_sum += field.ty.bitSize(mod); } @@ -1123,7 +1123,7 @@ pub const Union = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1132,12 +1132,12 @@ pub const Union = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1151,7 +1151,7 @@ pub const Union = struct { /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. pub fn normalAlignment(field: Field, mod: *Module) u32 { - return @intCast(u32, field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod)); + return @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod))); } }; @@ -1205,7 +1205,7 @@ pub const Union = struct { most_index = i; } } - return @intCast(u32, most_index); + return @as(u32, @intCast(most_index)); } /// Returns 0 if the union is represented with 0 bits at runtime. @@ -1267,11 +1267,11 @@ pub const Union = struct { const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; - biggest_field = @intCast(u32, i); + biggest_field = @as(u32, @intCast(i)); } if (field_align > payload_align) { - payload_align = @intCast(u32, field_align); - most_aligned_field = @intCast(u32, i); + payload_align = @as(u32, @intCast(field_align)); + most_aligned_field = @as(u32, @intCast(i)); most_aligned_field_size = field_size; } } @@ -1303,7 +1303,7 @@ pub const Union = struct { size += payload_size; const prev_size = size; size = std.mem.alignForward(u64, size, tag_align); - padding = @intCast(u32, size - prev_size); + padding = @as(u32, @intCast(size - prev_size)); } else { // {Payload, Tag} size += payload_size; @@ -1311,7 +1311,7 @@ pub const Union = struct { size += tag_size; const prev_size = size; size = std.mem.alignForward(u64, size, payload_align); - padding = @intCast(u32, size - prev_size); + padding = @as(u32, @intCast(size - prev_size)); } return .{ .abi_size = size, @@ -1409,7 +1409,7 @@ pub const Fn = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1418,12 +1418,12 @@ pub const Fn = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1477,7 +1477,7 @@ pub const Fn = struct { _, pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @enumFromInt(InferredErrorSet.OptionalIndex, @intFromEnum(i)); + return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1486,12 +1486,12 @@ pub const Fn = struct { _, pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @enumFromInt(InferredErrorSet.OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { if (oi == .none) return null; - return @enumFromInt(InferredErrorSet.Index, @intFromEnum(oi)); + return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1613,7 +1613,7 @@ pub const Namespace = struct { _, pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(i)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1622,12 +1622,12 @@ pub const Namespace = struct { _, pub fn init(oi: ?Index) OptionalIndex { - return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none)); + return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } pub fn unwrap(oi: OptionalIndex) ?Index { if (oi == .none) return null; - return @enumFromInt(Index, @intFromEnum(oi)); + return @as(Index, @enumFromInt(@intFromEnum(oi))); } }; @@ -1867,7 +1867,7 @@ pub const File = struct { if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; - const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); defer if (!file.source_loaded) gpa.free(source); const amt = try f.readAll(source); if (amt != stat.size) @@ -2116,7 +2116,7 @@ pub const SrcLoc = struct { } pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex { - return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node)); + return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node)))); } pub const Span = struct { @@ -2135,7 +2135,7 @@ pub const SrcLoc = struct { .token_abs => |tok_index| { const tree = try src_loc.file_scope.getTree(gpa); const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_abs => |node| { @@ -2146,14 +2146,14 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const tok_index = src_loc.declSrcToken(); const start = tree.tokens.items(.start)[tok_index] + byte_off; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .token_offset => |tok_off| { const tree = try src_loc.file_scope.getTree(gpa); const tok_index = src_loc.declSrcToken() + tok_off; const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset => |traced_off| { @@ -2206,7 +2206,7 @@ pub const SrcLoc = struct { } const tok_index = full.ast.mut_token + 1; // the name token const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset_var_decl_align => |node_off| { @@ -2292,7 +2292,7 @@ pub const SrcLoc = struct { else => tree.firstToken(node) - 2, }; const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset_deref_ptr => |node_off| { @@ -2359,7 +2359,7 @@ pub const SrcLoc = struct { // that contains this input. const node_tags = tree.nodes.items(.tag); for (node_tags, 0..) |node_tag, node_usize| { - const node = @intCast(Ast.Node.Index, node_usize); + const node = @as(Ast.Node.Index, @intCast(node_usize)); switch (node_tag) { .for_simple, .@"for" => { const for_full = tree.fullFor(node).?; @@ -2479,7 +2479,7 @@ pub const SrcLoc = struct { }; const start = tree.tokens.items(.start)[start_tok]; const end_start = tree.tokens.items(.start)[end_tok]; - const end = end_start + @intCast(u32, tree.tokenSlice(end_tok).len); + const end = end_start + @as(u32, @intCast(tree.tokenSlice(end_tok).len)); return Span{ .start = start, .end = end, .main = start }; }, .node_offset_fn_type_align => |node_off| { @@ -2539,7 +2539,7 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const token_tags = tree.tokens.items(.tag); const main_token = tree.nodes.items(.main_token)[src_loc.parent_decl_node]; - const tok_index = @bitCast(Ast.TokenIndex, token_off + @bitCast(i32, main_token)); + const tok_index = @as(Ast.TokenIndex, @bitCast(token_off + @as(i32, @bitCast(main_token)))); var first_tok = tok_index; while (true) switch (token_tags[first_tok - 1]) { @@ -2568,7 +2568,7 @@ pub const SrcLoc = struct { const full = tree.fullFnProto(&buf, parent_node).?; const tok_index = full.lib_name.?; const start = tree.tokens.items(.start)[tok_index]; - const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len)); return Span{ .start = start, .end = end, .main = start }; }, @@ -2761,7 +2761,7 @@ pub const SrcLoc = struct { end_tok = main; } const start_off = token_starts[start_tok]; - const end_off = token_starts[end_tok] + @intCast(u32, tree.tokenSlice(end_tok).len); + const end_off = token_starts[end_tok] + @as(u32, @intCast(tree.tokenSlice(end_tok).len)); return Span{ .start = start_off, .end = end_off, .main = token_starts[main] }; } }; @@ -3577,7 +3577,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void { if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; - const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); defer if (!file.source_loaded) gpa.free(source); const amt = try source_file.readAll(source); if (amt != stat.size) @@ -3609,21 +3609,21 @@ pub fn astGenFile(mod: *Module, file: *File) !void { if (file.zir.instructions.len == 0) @as([*]const u8, undefined) else - @ptrCast([*]const u8, safety_buffer.ptr) + @as([*]const u8, @ptrCast(safety_buffer.ptr)) else - @ptrCast([*]const u8, file.zir.instructions.items(.data).ptr); + @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); if (data_has_safety_tag) { // The `Data` union has a safety tag but in the file format we store it without. for (file.zir.instructions.items(.data), 0..) |*data, i| { - const as_struct = @ptrCast(*const HackDataLayout, data); + const as_struct = @as(*const HackDataLayout, @ptrCast(data)); safety_buffer[i] = as_struct.data; } } const header: Zir.Header = .{ - .instructions_len = @intCast(u32, file.zir.instructions.len), - .string_bytes_len = @intCast(u32, file.zir.string_bytes.len), - .extra_len = @intCast(u32, file.zir.extra.len), + .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), + .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), + .extra_len = @as(u32, @intCast(file.zir.extra.len)), .stat_size = stat.size, .stat_inode = stat.inode, @@ -3631,11 +3631,11 @@ pub fn astGenFile(mod: *Module, file: *File) !void { }; var iovecs = [_]std.os.iovec_const{ .{ - .iov_base = @ptrCast([*]const u8, &header), + .iov_base = @as([*]const u8, @ptrCast(&header)), .iov_len = @sizeOf(Zir.Header), }, .{ - .iov_base = @ptrCast([*]const u8, file.zir.instructions.items(.tag).ptr), + .iov_base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), .iov_len = file.zir.instructions.len, }, .{ @@ -3647,7 +3647,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void { .iov_len = file.zir.string_bytes.len, }, .{ - .iov_base = @ptrCast([*]const u8, file.zir.extra.ptr), + .iov_base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), .iov_len = file.zir.extra.len * 4, }, }; @@ -3722,13 +3722,13 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) defer if (data_has_safety_tag) gpa.free(safety_buffer); const data_ptr = if (data_has_safety_tag) - @ptrCast([*]u8, safety_buffer.ptr) + @as([*]u8, @ptrCast(safety_buffer.ptr)) else - @ptrCast([*]u8, zir.instructions.items(.data).ptr); + @as([*]u8, @ptrCast(zir.instructions.items(.data).ptr)); var iovecs = [_]std.os.iovec{ .{ - .iov_base = @ptrCast([*]u8, zir.instructions.items(.tag).ptr), + .iov_base = @as([*]u8, @ptrCast(zir.instructions.items(.tag).ptr)), .iov_len = header.instructions_len, }, .{ @@ -3740,7 +3740,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) .iov_len = header.string_bytes_len, }, .{ - .iov_base = @ptrCast([*]u8, zir.extra.ptr), + .iov_base = @as([*]u8, @ptrCast(zir.extra.ptr)), .iov_len = header.extra_len * 4, }, }; @@ -3753,7 +3753,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) const tags = zir.instructions.items(.tag); for (zir.instructions.items(.data), 0..) |*data, i| { const union_tag = Zir.Inst.Tag.data_tags[@intFromEnum(tags[i])]; - const as_struct = @ptrCast(*HackDataLayout, data); + const as_struct = @as(*HackDataLayout, @ptrCast(data)); as_struct.* = .{ .safety_tag = @intFromEnum(union_tag), .data = safety_buffer[i], @@ -4394,7 +4394,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const struct_obj = mod.structPtr(struct_index); struct_obj.zir_index = main_struct_inst; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); struct_obj.is_tuple = small.is_tuple; var sema_arena = std.heap.ArenaAllocator.init(gpa); @@ -5051,13 +5051,13 @@ pub fn scanNamespace( cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } - const flags = @truncate(u4, cur_bit_bag); + const flags = @as(u4, @truncate(cur_bit_bag)); cur_bit_bag >>= 4; const decl_sub_index = extra_index; extra_index += 8; // src_hash(4) + line(1) + name(1) + value(1) + doc_comment(1) - extra_index += @truncate(u1, flags >> 2); // Align - extra_index += @as(u2, @truncate(u1, flags >> 3)) * 2; // Link section or address space, consists of 2 Refs + extra_index += @as(u1, @truncate(flags >> 2)); // Align + extra_index += @as(u2, @as(u1, @truncate(flags >> 3))) * 2; // Link section or address space, consists of 2 Refs try scanDecl(&scan_decl_iter, decl_sub_index, flags); } @@ -5195,7 +5195,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err new_decl.is_exported = is_exported; new_decl.has_align = has_align; new_decl.has_linksection_or_addrspace = has_linksection_or_addrspace; - new_decl.zir_decl_index = @intCast(u32, decl_sub_index); + new_decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive. return; } @@ -5229,7 +5229,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.kind = kind; decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; - decl.zir_decl_index = @intCast(u32, decl_sub_index); + decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); if (decl.getOwnedFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { @@ -5481,7 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len); + const runtime_params_len = @as(u32, @intCast(mod.typeToFunc(fn_ty).?.param_types.len)); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); @@ -5524,13 +5524,13 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE continue; } const air_ty = try sema.addType(param_ty); - const arg_index = @intCast(u32, sema.air_instructions.len); + const arg_index = @as(u32, @intCast(sema.air_instructions.len)); inner_block.instructions.appendAssumeCapacity(arg_index); sema.air_instructions.appendAssumeCapacity(.{ .tag = .arg, .data = .{ .arg = .{ .ty = air_ty, - .src_index = @intCast(u32, total_param_index), + .src_index = @as(u32, @intCast(total_param_index)), } }, }); sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index)); @@ -5593,7 +5593,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, inner_block.instructions.items.len), + .body_len = @as(u32, @intCast(inner_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; @@ -5671,7 +5671,7 @@ pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index } const ptr = try mod.allocated_namespaces.addOne(mod.gpa); ptr.* = initialization; - return @enumFromInt(Namespace.Index, mod.allocated_namespaces.len - 1); + return @as(Namespace.Index, @enumFromInt(mod.allocated_namespaces.len - 1)); } pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { @@ -5729,7 +5729,7 @@ pub fn allocateNewDecl( } break :d .{ .new_decl = decl, - .decl_index = @enumFromInt(Decl.Index, mod.allocated_decls.len - 1), + .decl_index = @as(Decl.Index, @enumFromInt(mod.allocated_decls.len - 1)), }; }; @@ -5767,7 +5767,7 @@ pub fn getErrorValue( name: InternPool.NullTerminatedString, ) Allocator.Error!ErrorInt { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - return @intCast(ErrorInt, gop.index); + return @as(ErrorInt, @intCast(gop.index)); } pub fn getErrorValueFromSlice( @@ -6139,7 +6139,7 @@ pub fn paramSrc( if (i == param_i) { if (param.anytype_ellipsis3) |some| { const main_token = tree.nodes.items(.main_token)[decl.src_node]; - return .{ .token_offset_param = @bitCast(i32, some) - @bitCast(i32, main_token) }; + return .{ .token_offset_param = @as(i32, @bitCast(some)) - @as(i32, @bitCast(main_token)) }; } return .{ .node_offset_param = decl.nodeIndexToRelative(param.type_expr) }; } @@ -6892,11 +6892,11 @@ pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocato /// losing data if the representation wasn't correct. pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) { - 16 => .{ .f16 = @floatCast(f16, x) }, - 32 => .{ .f32 = @floatCast(f32, x) }, - 64 => .{ .f64 = @floatCast(f64, x) }, - 80 => .{ .f80 = @floatCast(f80, x) }, - 128 => .{ .f128 = @floatCast(f128, x) }, + 16 => .{ .f16 = @as(f16, @floatCast(x)) }, + 32 => .{ .f32 = @as(f32, @floatCast(x)) }, + 64 => .{ .f64 = @as(f64, @floatCast(x)) }, + 80 => .{ .f80 = @as(f80, @floatCast(x)) }, + 128 => .{ .f128 = @as(f128, @floatCast(x)) }, else => unreachable, }; const i = try intern(mod, .{ .float = .{ @@ -6956,18 +6956,18 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(sign); // Protect against overflow in the following negation. if (x == std.math.minInt(i64)) return 64; - return Type.smallestUnsignedBits(@intCast(u64, -(x + 1))) + 1; + return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1; }, .u64 => |x| { return Type.smallestUnsignedBits(x) + @intFromBool(sign); }, .big_int => |big| { - if (big.positive) return @intCast(u16, big.bitCountAbs() + @intFromBool(sign)); + if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign))); // Zero is still a possibility, in which case unsigned is fine if (big.eqZero()) return 0; - return @intCast(u16, big.bitCountTwosComp()); + return @as(u16, @intCast(big.bitCountTwosComp())); }, .lazy_align => |lazy_ty| { return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign); diff --git a/src/Package.zig b/src/Package.zig index dd8f3c8a7e19..2e1dd4e14f1e 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -390,10 +390,10 @@ const Report = struct { .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(file_path), .span_start = token_starts[msg.tok], - .span_end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len), + .span_end = @as(u32, @intCast(token_starts[msg.tok] + ast.tokenSlice(msg.tok).len)), .span_main = token_starts[msg.tok] + msg.off, - .line = @intCast(u32, start_loc.line), - .column = @intCast(u32, start_loc.column), + .line = @as(u32, @intCast(start_loc.line)), + .column = @as(u32, @intCast(start_loc.column)), .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]), }), .notes_len = notes_len, diff --git a/src/Sema.zig b/src/Sema.zig index e45cccd43bad..95ebaca9fb36 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -212,7 +212,7 @@ pub const InstMap = struct { while (true) { const extra_capacity = better_capacity / 2 + 16; better_capacity += extra_capacity; - better_start -|= @intCast(Zir.Inst.Index, extra_capacity / 2); + better_start -|= @as(Zir.Inst.Index, @intCast(extra_capacity / 2)); if (better_start <= start and end < better_capacity + better_start) break; } @@ -225,7 +225,7 @@ pub const InstMap = struct { allocator.free(map.items); map.items = new_items; - map.start = @intCast(Zir.Inst.Index, better_start); + map.start = @as(Zir.Inst.Index, @intCast(better_start)); } }; @@ -619,7 +619,7 @@ pub const Block = struct { const sema = block.sema; const ty_ref = try sema.addType(aggregate_ty); try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len); - const extra_index = @intCast(u32, sema.air_extra.items.len); + const extra_index = @as(u32, @intCast(sema.air_extra.items.len)); sema.appendRefsAssumeCapacity(elements); return block.addInst(.{ @@ -660,7 +660,7 @@ pub const Block = struct { try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); return result_index; @@ -678,7 +678,7 @@ pub const Block = struct { try sema.air_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.appendAssumeCapacity(inst); try block.instructions.insert(gpa, index, result_index); @@ -1763,7 +1763,7 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { const i = @intFromEnum(zir_ref); // First section of indexes correspond to a set number of constant values. // We intentionally map the same indexes to the same values between ZIR and AIR. - if (i < InternPool.static_len) return @enumFromInt(Air.Inst.Ref, i); + if (i < InternPool.static_len) return @as(Air.Inst.Ref, @enumFromInt(i)); // The last section of indexes refers to the map of ZIR => AIR. const inst = sema.inst_map.get(i - InternPool.static_len).?; if (inst == .generic_poison) return error.GenericPoison; @@ -2041,7 +2041,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( // First section of indexes correspond to a set number of constant values. const int = @intFromEnum(inst); if (int < InternPool.static_len) { - return @enumFromInt(InternPool.Index, int).toValue(); + return @as(InternPool.Index, @enumFromInt(int)).toValue(); } const i = int - InternPool.static_len; @@ -2430,7 +2430,7 @@ fn analyzeAsAlign( air_ref: Air.Inst.Ref, ) !Alignment { const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, "alignment must be comptime-known"); - const alignment = @intCast(u32, alignment_big); // We coerce to u29 in the prev line. + const alignment = @as(u32, @intCast(alignment_big)); // We coerce to u29 in the prev line. try sema.validateAlign(block, src, alignment); return Alignment.fromNonzeroByteUnits(alignment); } @@ -2737,7 +2737,7 @@ pub fn analyzeStructDecl( const struct_obj = mod.structPtr(struct_index); const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); struct_obj.known_non_opv = small.known_non_opv; if (small.known_comptime_only) { @@ -2774,9 +2774,9 @@ fn zirStructDecl( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extended.operand])); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -2937,18 +2937,18 @@ fn zirEnumDecl( const mod = sema.mod; const gpa = sema.gpa; - const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); + const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extra_index]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const tag_type_ref = if (small.has_tag_type) blk: { - const tag_type_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; @@ -3108,7 +3108,7 @@ fn zirEnumDecl( cur_bit_bag = sema.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_tag_value = @truncate(u1, cur_bit_bag) != 0; + const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]); @@ -3131,7 +3131,7 @@ fn zirEnumDecl( } const tag_overflow = if (has_tag_value) overflow: { - const tag_val_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const tag_val_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const tag_inst = try sema.resolveInst(tag_val_ref); last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { @@ -3213,11 +3213,11 @@ fn zirUnionDecl( const mod = sema.mod; const gpa = sema.gpa; - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extra_index]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -3298,11 +3298,11 @@ fn zirOpaqueDecl( defer tracy.end(); const mod = sema.mod; - const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); + const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset = @bitCast(i32, sema.code.extra[extra_index]); + const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -3369,7 +3369,7 @@ fn zirErrorSetDecl( var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); - var extra_index = @intCast(u32, extra.end); + var extra_index = @as(u32, @intCast(extra.end)); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; @@ -3569,18 +3569,18 @@ fn zirAllocExtended( const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; - const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small); + const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; const var_ty: Type = if (small.has_type) blk: { - const type_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk try sema.resolveType(block, ty_src, type_ref); } else undefined; const alignment = if (small.has_align) blk: { - const align_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const alignment = try sema.resolveAlign(block, align_src, align_ref); break :blk alignment; @@ -3598,7 +3598,7 @@ fn zirAllocExtended( .is_const = small.is_const, } }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } } @@ -3730,7 +3730,7 @@ fn zirAllocInferredComptime( .is_const = is_const, } }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3795,7 +3795,7 @@ fn zirAllocInferred( .is_const = is_const, } }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } const result_index = try block.addInstAsIndex(.{ @@ -4037,7 +4037,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, replacement_block.instructions.items.len), + .body_len = @as(u32, @intCast(replacement_block.instructions.items.len)), }), } }, }); @@ -4121,7 +4121,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // First pass to look for comptime values. for (args, 0..) |zir_arg, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); runtime_arg_lens[i] = .none; if (zir_arg == .none) continue; const object = try sema.resolveInst(zir_arg); @@ -4192,7 +4192,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const msg = try sema.errMsg(block, src, "unbounded for loop", .{}); errdefer msg.destroy(gpa); for (args, 0..) |zir_arg, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); if (zir_arg == .none) continue; const object = try sema.resolveInst(zir_arg); const object_ty = sema.typeOf(object); @@ -4435,7 +4435,7 @@ fn validateUnionInit( } const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?)); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); if (init_val) |val| { @@ -4547,9 +4547,9 @@ fn validateStructInit( const field_src = init_src; // TODO better source location const default_field_ptr = if (struct_ty.isTuple(mod)) - try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) + try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true) else - try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); + try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true); const init = try sema.addConstant(default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -4729,9 +4729,9 @@ fn validateStructInit( const field_src = init_src; // TODO better source location const default_field_ptr = if (struct_ty.isTuple(mod)) - try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) + try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true) else - try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); + try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true); const init = try sema.addConstant(field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -5165,7 +5165,7 @@ fn storeToInferredAllocComptime( fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const quota = @intCast(u32, try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known")); + const quota = @as(u32, @intCast(try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known"))); sema.branch_quota = @max(sema.branch_quota, quota); } @@ -5388,7 +5388,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError // Reserve space for a Loop instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const loop_inst = block_inst + 1; try sema.air_instructions.ensureUnusedCapacity(gpa, 2); sema.air_instructions.appendAssumeCapacity(.{ @@ -5436,7 +5436,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len); sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(u32, loop_block_len) }, + Air.Block{ .body_len = @as(u32, @intCast(loop_block_len)) }, ); sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items); } @@ -5586,7 +5586,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_compt // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated or is an unlabeled block. - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, @@ -5733,7 +5733,7 @@ fn analyzeBlockBody( sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, child_block.instructions.items.len), + .body_len = @as(u32, @intCast(child_block.instructions.items.len)), }), } }; sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); @@ -5761,11 +5761,11 @@ fn analyzeBlockBody( // Convert the br instruction to a block instruction that has the coercion // and then a new br inside that returns the coerced instruction. - const sub_block_len = @intCast(u32, coerce_block.instructions.items.len + 1); + const sub_block_len = @as(u32, @intCast(coerce_block.instructions.items.len + 1)); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + sub_block_len); try sema.air_instructions.ensureUnusedCapacity(gpa, 1); - const sub_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const sub_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.items(.tag)[br] = .block; sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{ @@ -6114,7 +6114,7 @@ fn addDbgVar( try sema.queueFullTypeResolution(operand_ty); // Add the name to the AIR. - const name_extra_index = @intCast(u32, sema.air_extra.items.len); + const name_extra_index = @as(u32, @intCast(sema.air_extra.items.len)); const elements_used = name.len / 4 + 1; try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used); const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); @@ -6314,7 +6314,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref .tag = .save_err_return_trace_index, .data = .{ .ty_pl = .{ .ty = try sema.addType(stack_trace_ty), - .payload = @intCast(u32, field_index), + .payload = @as(u32, @intCast(field_index)), } }, }); } @@ -6386,12 +6386,12 @@ fn popErrorReturnTrace( then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block - const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const cond_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, then_block.instructions.items.len), - .else_body_len = @intCast(u32, else_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), + .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }), } } }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); @@ -6422,7 +6422,7 @@ fn zirCall( const extra = sema.code.extraData(ExtraType, inst_data.payload_index); const args_len = extra.data.flags.args_len; - const modifier = @enumFromInt(std.builtin.CallModifier, extra.data.flags.packed_modifier); + const modifier = @as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier)); const ensure_result_used = extra.data.flags.ensure_result_used; const pop_error_return_trace = extra.data.flags.pop_error_return_trace; @@ -6460,7 +6460,7 @@ fn zirCall( const args_body = sema.code.extra[extra.end..]; var input_is_error = false; - const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); + const block_index = @as(Air.Inst.Index, @intCast(block.instructions.items.len)); const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len; const parent_comptime = block.is_comptime; @@ -6477,7 +6477,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@as(u5, @intCast(arg_index)))) { block.is_comptime = true; // TODO set comptime_reason } @@ -6533,7 +6533,7 @@ fn zirCall( .tag = .save_err_return_trace_index, .data = .{ .ty_pl = .{ .ty = try sema.addType(stack_trace_ty), - .payload = @intCast(u32, field_index), + .payload = @as(u32, @intCast(field_index)), } }, }); @@ -6809,7 +6809,7 @@ fn analyzeCall( // set to in the `Block`. // This block instruction will be used to capture the return value from the // inlined function. - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, @@ -7077,7 +7077,7 @@ fn analyzeCall( if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, - .param_i = @intCast(u32, i), + .param_i = @as(u32, @intCast(i)), } }; const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); args[i] = sema.analyzeCallArg( @@ -7136,7 +7136,7 @@ fn analyzeCall( .data = .{ .pl_op = .{ .operand = func, .payload = sema.addExtraAssumeCapacity(Air.Call{ - .args_len = @intCast(u32, args.len), + .args_len = @as(u32, @intCast(args.len)), }), } }, }); @@ -7245,7 +7245,7 @@ fn analyzeInlineCallArg( } const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, - .param_i = @intCast(u32, arg_i.*), + .param_i = @as(u32, @intCast(arg_i.*)), } }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -7419,14 +7419,14 @@ fn instantiateGenericCall( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_anytype_comptime => { is_anytype = true; @@ -7588,7 +7588,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const runtime_args_len = @intCast(u32, mod.typeToFunc(func_ty).?.param_types.len); + const runtime_args_len = @as(u32, @intCast(mod.typeToFunc(func_ty).?.param_types.len)); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { var runtime_i: u32 = 0; @@ -7738,14 +7738,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_anytype_comptime => { is_anytype = true; @@ -7779,7 +7779,7 @@ fn resolveGenericInstantiationType( .tag = .arg, .data = .{ .arg = .{ .ty = try child_sema.addType(arg_ty), - .src_index = @intCast(u32, arg_i), + .src_index = @as(u32, @intCast(arg_i)), } }, }); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); @@ -7799,7 +7799,7 @@ fn resolveGenericInstantiationType( const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; assert(new_func == new_module_func); - const monomorphed_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); + const monomorphed_args_index = @as(u32, @intCast(mod.monomorphed_func_keys.items.len)); const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len); var monomorphed_arg_i: u32 = 0; try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod }); @@ -7811,14 +7811,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); }, .param_anytype_comptime => { is_anytype = true; @@ -7984,7 +7984,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); + const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"))); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try mod.vectorType(.{ @@ -8140,7 +8140,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD switch (names.len) { 0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)), 1 => { - const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?); + const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?)); return sema.addIntUnsigned(Type.err_int, int); }, else => {}, @@ -8727,7 +8727,7 @@ fn zirFunc( const ret_ty: Type = switch (extra.data.ret_body_len) { 0 => Type.void, 1 => blk: { - const ret_ty_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| { break :blk ret_ty; @@ -8964,7 +8964,7 @@ fn funcCommon( for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; - break :blk @truncate(u1, noalias_bits >> index) != 0; + break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; }; dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( @@ -9199,8 +9199,8 @@ fn funcCommon( .hash = hash, .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, - .lbrace_column = @truncate(u16, src_locs.columns), - .rbrace_column = @truncate(u16, src_locs.columns >> 16), + .lbrace_column = @as(u16, @truncate(src_locs.columns)), + .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; @@ -9225,7 +9225,7 @@ fn analyzeParameter( const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); if (param.is_comptime or requires_comptime) { - comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + comptime_bits.* |= @as(u32, 1) << @as(u5, @intCast(i)); // TODO: handle cast error } const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; @@ -9411,7 +9411,7 @@ fn zirParam( sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. - const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(sema.gpa, .{ .tag = .alloc, .data = .{ .ty = param_ty }, @@ -10287,7 +10287,7 @@ const SwitchProngAnalysis = struct { if (inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, inline_case_capture, "") catch unreachable; if (operand_ty.zigTypeTag(mod) == .Union) { - const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, mod).?); + const field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?)); const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (capture_byref) { @@ -10346,13 +10346,13 @@ const SwitchProngAnalysis = struct { const union_obj = mod.typeToUnion(operand_ty).?; const first_item_val = sema.resolveConstValue(block, .unneeded, case_vals[0], "") catch unreachable; - const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, mod).?); + const first_field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(first_item_val, mod).?)); const first_field = union_obj.fields.values()[first_field_index]; const field_tys = try sema.arena.alloc(Type, case_vals.len); for (case_vals, field_tys) |item, *field_ty| { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_idx = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); + const field_idx = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, sema.mod).?)); field_ty.* = union_obj.fields.values()[field_idx].ty; } @@ -10378,7 +10378,7 @@ const SwitchProngAnalysis = struct { const multi_idx = raw_capture_src.multi_capture; const src_decl_ptr = sema.mod.declPtr(block.src_decl); for (case_srcs, 0..) |*case_src, i| { - const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, i) } }; + const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } }; case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); } const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); @@ -10426,7 +10426,7 @@ const SwitchProngAnalysis = struct { const multi_idx = raw_capture_src.multi_capture; const src_decl_ptr = sema.mod.declPtr(block.src_decl); const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); - const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, i) } }; + const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } }; const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); const msg = msg: { const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); @@ -10529,12 +10529,12 @@ const SwitchProngAnalysis = struct { var coerce_block = block.makeSubBlock(); defer coerce_block.instructions.deinit(sema.gpa); - const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @intCast(u32, idx), field_tys[idx]); + const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(idx)), field_tys[idx]); const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const multi_idx = raw_capture_src.multi_capture; const src_decl_ptr = sema.mod.declPtr(block.src_decl); - const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, idx) } }; + const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(idx)) } }; const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none); _ = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src); unreachable; @@ -10545,7 +10545,7 @@ const SwitchProngAnalysis = struct { try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, coerce_block.instructions.items.len)); // body_len + cases_extra.appendAssumeCapacity(@as(u32, @intCast(coerce_block.instructions.items.len))); // body_len cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item cases_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); // body } @@ -10556,7 +10556,7 @@ const SwitchProngAnalysis = struct { defer coerce_block.instructions.deinit(sema.gpa); const first_imc = in_mem_coercible.findFirstSet().?; - const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @intCast(u32, first_imc), field_tys[first_imc]); + const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(first_imc)), field_tys[first_imc]); const coerced = try coerce_block.addBitCast(capture_ty, uncoerced); _ = try coerce_block.addBr(capture_block_inst, coerced); @@ -10569,14 +10569,14 @@ const SwitchProngAnalysis = struct { @typeInfo(Air.Block).Struct.fields.len + 1); - const switch_br_inst = @intCast(u32, sema.air_instructions.len); + const switch_br_inst = @as(u32, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(sema.gpa, .{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = spa.cond, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(u32, prong_count), - .else_body_len = @intCast(u32, else_body_len), + .cases_len = @as(u32, @intCast(prong_count)), + .else_body_len = @as(u32, @intCast(else_body_len)), }), } }, }); @@ -10763,7 +10763,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .has_tag_capture = false, }, .under, .@"else" => blk: { - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[header_extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[header_extra_index])); const extra_body_start = header_extra_index + 1; break :blk .{ .body = sema.code.extra[extra_body_start..][0..info.body_len], @@ -10833,9 +10833,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum( @@ -10856,7 +10856,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -10870,7 +10870,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -10932,9 +10932,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemError( @@ -10954,7 +10954,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -10967,7 +10967,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -11073,9 +11073,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt( @@ -11095,7 +11095,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; @@ -11108,16 +11108,16 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len); var range_i: u32 = 0; while (range_i < ranges_len) : (range_i += 1) { - const item_first = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_first = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const item_last = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_last = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const vals = try sema.validateSwitchRange( @@ -11168,9 +11168,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool( @@ -11190,7 +11190,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -11203,7 +11203,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r &false_count, item_ref, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -11250,9 +11250,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; extra_index += info.body_len; @@ -11273,7 +11273,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; @@ -11286,7 +11286,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r item_ref, operand_ty, src_node_offset, - .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, + .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, )); } @@ -11324,7 +11324,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .tag_capture_inst = tag_capture_inst, }; - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, @@ -11368,7 +11368,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const body = sema.code.extra[extra_index..][0..info.body_len]; extra_index += info.body_len; @@ -11382,7 +11382,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .scalar_capture = @intCast(u32, scalar_i) }, + .{ .scalar_capture = @as(u32, @intCast(scalar_i)) }, &.{item}, if (info.is_inline) operand else .none, info.has_tag_capture, @@ -11399,7 +11399,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + items_len; const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..info.body_len]; @@ -11416,7 +11416,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .multi_capture = @intCast(u32, multi_i) }, + .{ .multi_capture = @as(u32, @intCast(multi_i)) }, items, if (info.is_inline) operand else .none, info.has_tag_capture, @@ -11443,7 +11443,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .multi_capture = @intCast(u32, multi_i) }, + .{ .multi_capture = @as(u32, @intCast(multi_i)) }, undefined, // case_vals may be undefined for ranges if (info.is_inline) operand else .none, info.has_tag_capture, @@ -11528,7 +11528,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1; const body = sema.code.extra[extra_index..][0..info.body_len]; extra_index += info.body_len; @@ -11556,7 +11556,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .normal, body, info.capture, - .{ .scalar_capture = @intCast(u32, scalar_i) }, + .{ .scalar_capture = @as(u32, @intCast(scalar_i)) }, &.{item}, if (info.is_inline) item else .none, info.has_tag_capture, @@ -11569,7 +11569,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11589,7 +11589,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index])); extra_index += 1 + items_len; const items = case_vals.items[case_val_idx..][0..items_len]; @@ -11654,7 +11654,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); @@ -11676,7 +11676,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; + const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }; const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; @@ -11702,7 +11702,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11750,8 +11750,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(@intCast(u32, items.len)); - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(items.len))); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); for (items) |item| { cases_extra.appendAssumeCapacity(@intFromEnum(item)); @@ -11846,8 +11846,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, prev_then_body.len), - .else_body_len = @intCast(u32, cond_body.len), + .then_body_len = @as(u32, @intCast(prev_then_body.len)), + .else_body_len = @as(u32, @intCast(cond_body.len)), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(cond_body); @@ -11872,7 +11872,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r if (f != null) continue; cases_len += 1; - const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i)); + const item_val = try mod.enumValueFieldIndex(operand_ty, @as(u32, @intCast(i))); const item_ref = try sema.addConstant(item_val); case_block.instructions.shrinkRetainingCapacity(0); @@ -11903,7 +11903,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11944,7 +11944,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -11975,7 +11975,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -12003,7 +12003,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -12029,7 +12029,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } @@ -12098,8 +12098,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, prev_then_body.len), - .else_body_len = @intCast(u32, case_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(prev_then_body.len)), + .else_body_len = @as(u32, @intCast(case_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); @@ -12113,8 +12113,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(u32, cases_len), - .else_body_len = @intCast(u32, final_else_body.len), + .cases_len = @as(u32, @intCast(cases_len)), + .else_body_len = @as(u32, @intCast(final_else_body.len)), }), } } }); sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); @@ -13527,7 +13527,7 @@ fn analyzeTupleMul( var i: u32 = 0; while (i < tuple_len) : (i += 1) { const operand_src = lhs_src; // TODO better source location - element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(u32, i), operand_ty); + element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @as(u32, @intCast(i)), operand_ty); } i = 1; while (i < factor) : (i += 1) { @@ -15593,10 +15593,10 @@ fn analyzePtrArithmetic( // The resulting pointer is aligned to the lcd between the offset (an // arbitrary number) and the alignment factor (always a power of two, // non zero). - const new_align = @enumFromInt(Alignment, @min( + const new_align = @as(Alignment, @enumFromInt(@min( @ctz(addend), @intFromEnum(ptr_info.flags.alignment), - )); + ))); assert(new_align != .none); break :t try mod.ptrType(.{ @@ -15675,14 +15675,14 @@ fn zirAsm( const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src = LazySrcLoc.nodeOffset(extra.data.src_node); const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node }; - const outputs_len = @truncate(u5, extended.small); - const inputs_len = @truncate(u5, extended.small >> 5); - const clobbers_len = @truncate(u5, extended.small >> 10); - const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs_len = @as(u5, @truncate(extended.small)); + const inputs_len = @as(u5, @truncate(extended.small >> 5)); + const clobbers_len = @as(u5, @truncate(extended.small >> 10)); + const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0; const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { - const tmpl = @enumFromInt(Zir.Inst.Ref, extra.data.asm_source); + const tmpl = @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source)); const s: []const u8 = try sema.resolveConstString(block, src, tmpl, "assembly code must be comptime-known"); break :blk s; } else sema.code.nullTerminatedString(extra.data.asm_source); @@ -15721,7 +15721,7 @@ fn zirAsm( const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; - const is_type = @truncate(u1, output_type_bits) != 0; + const is_type = @as(u1, @truncate(output_type_bits)) != 0; output_type_bits >>= 1; if (is_type) { @@ -15783,10 +15783,10 @@ fn zirAsm( .data = .{ .ty_pl = .{ .ty = expr_ty, .payload = sema.addExtraAssumeCapacity(Air.Asm{ - .source_len = @intCast(u32, asm_source.len), + .source_len = @as(u32, @intCast(asm_source.len)), .outputs_len = outputs_len, - .inputs_len = @intCast(u32, args.len), - .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @intCast(u32, clobbers.len), + .inputs_len = @as(u32, @intCast(args.len)), + .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @as(u32, @intCast(clobbers.len)), }), } }, }); @@ -16192,7 +16192,7 @@ fn zirThis( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const this_decl_index = mod.namespaceDeclIndex(block.namespace); - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); return sema.analyzeDeclVal(block, src, this_decl_index); } @@ -16329,7 +16329,7 @@ fn zirFrameAddress( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); try sema.requireRuntimeBlock(block, src, null); return try block.addNoOp(.frame_addr); } @@ -16482,7 +16482,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; - break :blk @truncate(u1, info.noalias_bits >> index) != 0; + break :blk @as(u1, @truncate(info.noalias_bits >> index)) != 0; }; const param_fields = .{ @@ -16925,7 +16925,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .u64 = @intCast(u64, i) }, + .storage = .{ .u64 = @as(u64, @intCast(i)) }, } }); // TODO: write something like getCoercedInts to avoid needing to dupe const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names[i])); @@ -17739,7 +17739,7 @@ fn zirBoolBr( return sema.resolveBody(parent_block, body, inst); } - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = .{ .ty_pl = .{ @@ -17801,8 +17801,8 @@ fn finishCondBr( @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, then_block.instructions.items.len), - .else_body_len = @intCast(u32, else_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), + .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); @@ -17813,7 +17813,7 @@ fn finishCondBr( } } }); sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, + Air.Block{ .body_len = @as(u32, @intCast(child_block.instructions.items.len)) }, ); sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); @@ -17976,8 +17976,8 @@ fn zirCondbr( .data = .{ .pl_op = .{ .operand = cond, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, true_instructions.len), - .else_body_len = @intCast(u32, sub_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(true_instructions.len)), + .else_body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); @@ -18024,7 +18024,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! .data = .{ .pl_op = .{ .operand = err_union, .payload = sema.addExtraAssumeCapacity(Air.Try{ - .body_len = @intCast(u32, sub_block.instructions.items.len), + .body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); @@ -18084,7 +18084,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr .ty = res_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.TryPtr{ .ptr = operand, - .body_len = @intCast(u32, sub_block.instructions.items.len), + .body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); @@ -18100,7 +18100,7 @@ fn addRuntimeBreak(sema: *Sema, child_block: *Block, break_data: BreakData) !voi const labeled_block = if (!gop.found_existing) blk: { try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1); - const new_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const new_block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); gop.value_ptr.* = Air.indexToRef(new_block_inst); try sema.air_instructions.append(sema.gpa, .{ .tag = .block, @@ -18296,8 +18296,8 @@ fn retWithErrTracing( @typeInfo(Air.Block).Struct.fields.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(u32, then_block.instructions.items.len), - .else_body_len = @intCast(u32, else_block.instructions.items.len), + .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), + .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); @@ -18486,7 +18486,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); @@ -18494,7 +18494,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } else .none; const abi_align: Alignment = if (inst_data.flags.has_align) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); @@ -18507,29 +18507,29 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); + const abi_align = @as(u32, @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?)); try sema.validateAlign(block, align_src, abi_align); break :blk Alignment.fromByteUnits(abi_align); } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, "pointer bit-offset must be comptime-known"); - break :blk @intCast(u16, bit_offset); + break :blk @as(u16, @intCast(bit_offset)); } else 0; const host_size: u16 = if (inst_data.flags.has_bit_range) blk: { - const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]); + const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, "pointer host size must be comptime-known"); - break :blk @intCast(u16, host_size); + break :blk @as(u16, @intCast(host_size)); } else 0; if (host_size != 0 and bit_offset >= host_size * 8) { @@ -18669,7 +18669,7 @@ fn unionInit( if (try sema.resolveMaybeUndefVal(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?)); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); return sema.addConstant((try mod.intern(.{ .un = .{ .ty = union_ty.toIntern(), @@ -18771,7 +18771,7 @@ fn zirStructInit( const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?)); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); @@ -18915,7 +18915,7 @@ fn finishStructInit( }); const alloc = try block.addTy(.alloc, alloc_ty); for (field_inits, 0..) |field_init, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const field_src = dest_src; const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true); try sema.storePtr(block, dest_src, field_ptr, field_init); @@ -18958,7 +18958,7 @@ fn zirStructInitAnon( var runtime_index: ?usize = null; var extra_index = extra.end; for (types, 0..) |*field_ty, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; @@ -19037,7 +19037,7 @@ fn zirStructInitAnon( const alloc = try block.addTy(.alloc, alloc_ty); var extra_index = extra.end; for (types, 0..) |field_ty, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; @@ -19109,7 +19109,7 @@ fn zirArrayInit( const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { const comptime_known = try sema.isComptimeKnown(arg); - if (!comptime_known) break @intCast(u32, i); + if (!comptime_known) break @as(u32, @intCast(i)); } else null; const runtime_index = opt_runtime_index orelse { @@ -19244,7 +19244,7 @@ fn zirArrayInitAnon( }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); const field_ptr_ty = try mod.ptrType(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, @@ -19395,7 +19395,7 @@ fn zirFrame( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); return sema.failWithUseOfAsync(block, src); } @@ -19588,7 +19588,7 @@ fn zirReify( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const name_strategy = @enumFromInt(Zir.Inst.NameStrategy, extended.small); + const name_strategy = @as(Zir.Inst.NameStrategy, @enumFromInt(extended.small)); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const type_info_ty = try sema.getBuiltinType("Type"); @@ -19600,7 +19600,7 @@ fn zirReify( const target = mod.getTarget(); if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; - switch (@enumFromInt(std.builtin.TypeId, tag_index)) { + switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, .Bool => return Air.Inst.Ref.bool_type, @@ -19623,7 +19623,7 @@ fn zirReify( ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod))); const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, @@ -19636,7 +19636,7 @@ fn zirReify( try ip.getOrPutString(gpa, "child"), ).?); - const len = @intCast(u32, len_val.toUnsignedInt(mod)); + const len = @as(u32, @intCast(len_val.toUnsignedInt(mod))); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -19653,7 +19653,7 @@ fn zirReify( try ip.getOrPutString(gpa, "bits"), ).?); - const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod))); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -19925,7 +19925,7 @@ fn zirReify( } // Define our empty enum decl - const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + const fields_len = @as(u32, @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)))); const incomplete_enum = try ip.getIncompleteEnum(gpa, .{ .decl = new_decl_index, .namespace = .none, @@ -20288,7 +20288,7 @@ fn zirReify( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); + const alignment = @as(u29, @intCast(alignment_val.toUnsignedInt(mod))); if (alignment == target_util.defaultFunctionAlignment(target)) { break :alignment .none; } else { @@ -20565,7 +20565,7 @@ fn reifyStruct( try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_obj.backing_int_ty = backing_int_ty; } else { - struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum))); } struct_obj.status = .have_layout; @@ -20636,7 +20636,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C } fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); const va_list_ty = try sema.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); @@ -20903,7 +20903,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -21310,7 +21310,7 @@ fn ptrCastFull( fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const mod = sema.mod; - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -22271,7 +22271,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const scalar_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known")); + const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known"))); const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); @@ -22376,12 +22376,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; mask_ty = try mod.vectorType(.{ - .len = @intCast(u32, mask_len), + .len = @as(u32, @intCast(mask_len)), .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); - return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @intCast(u32, mask_len)); + return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @as(u32, @intCast(mask_len))); } fn analyzeShuffle( @@ -22425,8 +22425,8 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); - const b_len = @intCast(u32, maybe_b_len orelse a_len); + const a_len = @as(u32, @intCast(maybe_a_len orelse maybe_b_len.?)); + const b_len = @as(u32, @intCast(maybe_b_len orelse a_len)); const a_ty = try mod.vectorType(.{ .len = a_len, @@ -22445,17 +22445,17 @@ fn analyzeShuffle( .{ b_len, b_src, b_ty }, }; - for (0..@intCast(usize, mask_len)) |i| { + for (0..@as(usize, @intCast(mask_len))) |i| { const elem = try mask.elemValue(sema.mod, i); if (elem.isUndef(mod)) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { - unsigned = @intCast(u32, int); + unsigned = @as(u32, @intCast(int)); chosen = 0; } else { - unsigned = @intCast(u32, ~int); + unsigned = @as(u32, @intCast(~int)); chosen = 1; } if (unsigned >= operand_info[chosen][0]) { @@ -22488,7 +22488,7 @@ fn analyzeShuffle( continue; } const int = mask_elem_val.toSignedInt(mod); - const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); + const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int)); values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } return sema.addConstant((try mod.intern(.{ .aggregate = .{ @@ -22509,23 +22509,23 @@ fn analyzeShuffle( const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); - for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { + for (@as(usize, @intCast(0))..@as(usize, @intCast(min_len))) |i| { expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } - for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| { + for (@as(usize, @intCast(min_len))..@as(usize, @intCast(max_len))) |i| { expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } const expand_mask = try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .ty = (try mod.vectorType(.{ .len = @as(u32, @intCast(max_len)), .child = .comptime_int_type })).toIntern(), .storage = .{ .elems = expand_mask_values }, } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @as(u32, @intCast(max_len))); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @as(u32, @intCast(max_len))); } } @@ -22562,7 +22562,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C .Vector, .Array => pred_ty.arrayLen(mod), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; - const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); + const vec_len = @as(u32, @intCast(try sema.usizeCast(block, pred_src, vec_len_u64))); const bool_vec_ty = try mod.vectorType(.{ .len = vec_len, @@ -22930,7 +22930,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { - resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty); + resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @as(u32, @intCast(i)), args_ty); } const callee_ty = sema.typeOf(func); @@ -23048,7 +23048,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr .ty = try sema.addType(result_ptr), .payload = try block.sema.addExtra(Air.FieldParentPtr{ .field_ptr = casted_field_ptr, - .field_index = @intCast(u32, field_index), + .field_index = @as(u32, @intCast(field_index)), }), } }, }); @@ -23684,7 +23684,7 @@ fn zirVarExtended( const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; - const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); + const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; @@ -23699,7 +23699,7 @@ fn zirVarExtended( assert(!small.has_align); const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: { - const init_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const init_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk try sema.resolveInst(init_ref); } else .none; @@ -23776,7 +23776,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(mod)); + const alignment = @as(u32, @intCast(val.toUnsignedInt(mod))); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk .none; @@ -23784,7 +23784,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :blk Alignment.fromNonzeroByteUnits(alignment); } } else if (extra.data.bits.has_align_ref) blk: { - const align_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const align_tv = sema.resolveInstConst(block, align_src, align_ref, "alignment must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23792,7 +23792,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); + const alignment = @as(u32, @intCast(align_tv.val.toUnsignedInt(mod))); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk .none; @@ -23814,7 +23814,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { - const addrspace_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, "addrespace must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23838,7 +23838,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { - const section_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const section_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23862,7 +23862,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { - const cc_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, "calling convention must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23886,7 +23886,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { - const ret_ty_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]); + const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) { error.GenericPoison => { @@ -23995,7 +23995,7 @@ fn zirWasmMemorySize( return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } - const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known")); + const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known"))); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ .tag = .wasm_memory_size, @@ -24020,7 +24020,7 @@ fn zirWasmMemoryGrow( return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } - const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known")); + const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known"))); const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src); try sema.requireRuntimeBlock(block, builtin_src, null); @@ -24060,7 +24060,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), + .locality = @as(u2, @intCast(locality_val.toUnsignedInt(mod))), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -24259,7 +24259,7 @@ fn zirWorkItem( }, } - const dimension = @intCast(u32, try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known")); + const dimension = @as(u32, @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known"))); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ @@ -24814,7 +24814,7 @@ fn addSafetyCheckExtra( fail_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 3); - const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const cond_br_inst = block_inst + 1; const br_inst = cond_br_inst + 1; sema.air_instructions.appendAssumeCapacity(.{ @@ -24834,7 +24834,7 @@ fn addSafetyCheckExtra( .operand = ok, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = 1, - .else_body_len = @intCast(u32, fail_block.instructions.items.len), + .else_body_len = @as(u32, @intCast(fail_block.instructions.items.len)), }), } }, }); @@ -25210,7 +25210,7 @@ fn fieldVal( const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); return sema.addConstant( try mod.enumValueFieldIndex(enum_ty, field_index), ); @@ -25226,7 +25226,7 @@ fn fieldVal( } const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const enum_val = try mod.enumValueFieldIndex(child_type, field_index); return sema.addConstant(enum_val); }, @@ -25438,7 +25438,7 @@ fn fieldPtr( const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { - const field_index_u32 = @intCast(u32, field_index); + const field_index_u32 = @as(u32, @intCast(field_index)); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -25459,7 +25459,7 @@ fn fieldPtr( const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; - const field_index_u32 = @intCast(u32, field_index); + const field_index_u32 = @as(u32, @intCast(field_index)); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -25544,7 +25544,7 @@ fn fieldCallBind( if (mod.typeToStruct(struct_ty)) |struct_obj| { const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const field = struct_obj.fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); @@ -25559,7 +25559,7 @@ fn fieldCallBind( } else { const max = struct_ty.structFieldCount(mod); for (0..max) |i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); if (field_name == struct_ty.structFieldName(i, mod)) { return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } @@ -25570,7 +25570,7 @@ fn fieldCallBind( const union_ty = try sema.resolveTypeFields(concrete_ty); const fields = union_ty.unionFields(mod); const field_index_usize = fields.getIndex(field_name) orelse break :find_field; - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const field = fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); @@ -25792,7 +25792,7 @@ fn structFieldPtr( const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_big); + const field_index = @as(u32, @intCast(field_index_big)); return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing); } @@ -25838,7 +25838,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.packed_offset.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(mod)); + running_bits += @as(u16, @intCast(f.ty.bitSize(mod))); } ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8; @@ -25868,7 +25868,7 @@ fn structFieldPtrByIndex( const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8; - const new_align = @enumFromInt(Alignment, @ctz(byte_offset | parent_align)); + const new_align = @as(Alignment, @enumFromInt(@ctz(byte_offset | parent_align))); assert(new_align != .none); ptr_ty_data.flags.alignment = new_align; ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 }; @@ -25923,7 +25923,7 @@ fn structFieldVal( const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const field = struct_obj.fields.values()[field_index]; if (field.is_comptime) { @@ -26058,7 +26058,7 @@ fn unionFieldPtr( .address_space = union_ptr_ty.ptrAddressSpace(mod), }, }); - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?)); if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { @@ -26146,7 +26146,7 @@ fn unionFieldVal( const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); + const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?)); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); @@ -26226,7 +26226,7 @@ fn elemPtr( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(mod)); + const index = @as(u32, @intCast(index_val.toUnsignedInt(mod))); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -26261,7 +26261,7 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); const result_ty = try sema.elemPtrType(indexable_ty, index); const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(elem_ptr); @@ -26280,7 +26280,7 @@ fn elemPtrOneLayerOnly( .Struct => { assert(child_ty.isTuple(mod)); const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(mod)); + const index = @as(u32, @intCast(index_val.toUnsignedInt(mod))); return sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -26318,7 +26318,7 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); @@ -26355,7 +26355,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(mod)); + const index = @as(u32, @intCast(index_val.toUnsignedInt(mod))); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -26516,7 +26516,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(s); @@ -26532,7 +26532,7 @@ fn elemValArray( return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_val); } @@ -26644,7 +26644,7 @@ fn elemValSlice( return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const index = @as(usize, @intCast(index_val.toUnsignedInt(mod))); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); @@ -27287,7 +27287,7 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); }; return sema.addConstant( - try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)), + try mod.enumValueFieldIndex(dest_ty, @as(u32, @intCast(field_index))), ); }, .Union => blk: { @@ -27692,8 +27692,8 @@ const InMemoryCoercionResult = union(enum) { var index: u6 = 0; var actual_noalias = false; while (true) : (index += 1) { - const actual = @truncate(u1, param.actual >> index); - const wanted = @truncate(u1, param.wanted >> index); + const actual = @as(u1, @truncate(param.actual >> index)); + const wanted = @as(u1, @truncate(param.wanted >> index)); if (actual != wanted) { actual_noalias = actual == 1; break; @@ -28218,7 +28218,7 @@ fn coerceInMemoryAllowedFns( const dest_param_ty = dest_info.param_types[param_i].toType(); const src_param_ty = src_info.param_types[param_i].toType(); - const param_i_small = @intCast(u5, param_i); + const param_i_small = @as(u5, @intCast(param_i)); if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = param_i, @@ -28832,7 +28832,7 @@ fn beginComptimePtrMutation( // bytes.len may be one greater than dest_len because of the case when // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + const elems = try arena.alloc(Value, @as(usize, @intCast(dest_len))); for (elems, 0..) |*elem, i| { elem.* = try mod.intValue(elem_ty, bytes[i]); } @@ -28844,7 +28844,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[@intCast(usize, elem_ptr.index)], + &elems[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ); @@ -28872,7 +28872,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[@intCast(usize, elem_ptr.index)], + &elems[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ); @@ -28883,7 +28883,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)], + &val_ptr.castTag(.aggregate).?.data[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ), @@ -28909,7 +28909,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[@intCast(usize, elem_ptr.index)], + &elems[@as(usize, @intCast(elem_ptr.index))], ptr_elem_ty, parent.mut_decl, ); @@ -28964,7 +28964,7 @@ fn beginComptimePtrMutation( }, .field => |field_ptr| { const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - const field_index = @intCast(u32, field_ptr.index); + const field_index = @as(u32, @intCast(field_ptr.index)); var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); switch (parent.pointee) { @@ -29401,12 +29401,12 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)), + .val = try array_tv.val.elemValue(mod, @as(usize, @intCast(elem_ptr.index))), }; break :blk deref; }, .field => |field_ptr| blk: { - const field_index = @intCast(u32, field_ptr.index); + const field_index = @as(u32, @intCast(field_ptr.index)); const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); @@ -29990,7 +29990,7 @@ fn coerceTupleToArray( var runtime_src: ?LazySrcLoc = null; for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { - const i = @intCast(u32, i_usize); + const i = @as(u32, @intCast(i_usize)); if (i_usize == inst_len) { const sentinel_val = dest_ty.sentinel(mod).?; val.* = sentinel_val.toIntern(); @@ -30101,7 +30101,7 @@ fn coerceTupleToStruct( else => unreachable, }; for (0..field_count) |field_index_usize| { - const field_i = @intCast(u32, field_index_usize); + const field_i = @as(u32, @intCast(field_index_usize)); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { @@ -30217,7 +30217,7 @@ fn coerceTupleToTuple( var runtime_src: ?LazySrcLoc = null; for (0..dest_field_count) |field_index_usize| { - const field_i = @intCast(u32, field_index_usize); + const field_i = @as(u32, @intCast(field_index_usize)); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { @@ -31532,7 +31532,7 @@ fn compareIntsOnlyPossibleResult( const ty = try mod.intType( if (is_negative) .signed else .unsigned, - @intCast(u16, req_bits), + @as(u16, @intCast(req_bits)), ); const pop_count = lhs_val.popCount(ty, mod); @@ -32294,7 +32294,7 @@ fn resolvePeerTypesInner( }; return .{ .success = try mod.vectorType(.{ - .len = @intCast(u32, len.?), + .len = @as(u32, @intCast(len.?)), .child = child_ty.toIntern(), }) }; }, @@ -33402,7 +33402,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { for (struct_obj.fields.values(), 0..) |field, i| { optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) - @intCast(u32, i) + @as(u32, @intCast(i)) else Module.Struct.omitted_field; } @@ -33443,7 +33443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); if (small.has_backing_int) { var extra_index: usize = extended.operand; @@ -33497,7 +33497,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 }; const backing_int_ty = blk: { if (backing_int_body_len == 0) { - const backing_int_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref); } else { const body = zir.extra[extra_index..][0..backing_int_body_len]; @@ -33543,7 +33543,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum))); } } @@ -34178,7 +34178,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src = LazySrcLoc.nodeOffset(0); @@ -34288,13 +34288,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_init = @truncate(u1, cur_bit_bag) != 0; + const has_init = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const is_comptime = @truncate(u1, cur_bit_bag) != 0; + const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_type_body = @truncate(u1, cur_bit_bag) != 0; + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; var field_name_zir: ?[:0]const u8 = null; @@ -34309,7 +34309,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (has_type_body) { fields[field_i].type_body_len = zir.extra[extra_index]; } else { - fields[field_i].type_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); } extra_index += 1; @@ -34529,14 +34529,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src = LazySrcLoc.nodeOffset(0); extra_index += @intFromBool(small.has_src_node); const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { - const ty_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const ty_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk ty_ref; } else .none; @@ -34684,13 +34684,13 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } - const has_type = @truncate(u1, cur_bit_bag) != 0; + const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_tag = @truncate(u1, cur_bit_bag) != 0; + const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const unused = @truncate(u1, cur_bit_bag) != 0; + const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; @@ -34701,19 +34701,19 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { extra_index += 1; const field_type_ref: Zir.Inst.Ref = if (has_type) blk: { - const field_type_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const field_type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk field_type_ref; } else .none; const align_ref: Zir.Inst.Ref = if (has_align) blk: { - const align_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk align_ref; } else .none; const tag_ref: Air.Inst.Ref = if (has_tag) blk: { - const tag_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]); + const tag_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk try sema.resolveInst(tag_ref); } else .none; @@ -35427,12 +35427,12 @@ pub fn getTmpAir(sema: Sema) Air { pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { if (@intFromEnum(ty.toIntern()) < Air.ref_start_index) - return @enumFromInt(Air.Inst.Ref, @intFromEnum(ty.toIntern())); + return @as(Air.Inst.Ref, @enumFromInt(@intFromEnum(ty.toIntern()))); try sema.air_instructions.append(sema.gpa, .{ .tag = .interned, .data = .{ .interned = ty.toIntern() }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -35446,12 +35446,12 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, val: Value) SemaError!Air.Inst.Ref { if (@intFromEnum(val.toIntern()) < Air.ref_start_index) - return @enumFromInt(Air.Inst.Ref, @intFromEnum(val.toIntern())); + return @as(Air.Inst.Ref, @enumFromInt(@intFromEnum(val.toIntern()))); try sema.air_instructions.append(sema.gpa, .{ .tag = .interned, .data = .{ .interned = val.toIntern() }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { @@ -35462,12 +35462,12 @@ pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, sema.air_extra.items.len); + const result = @as(u32, @intCast(sema.air_extra.items.len)); inline for (fields) |field| { sema.air_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Air.Inst.Ref => @intFromEnum(@field(extra, field.name)), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), InternPool.Index => @intFromEnum(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); @@ -35476,7 +35476,7 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { } fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { - const coerced = @ptrCast([]const u32, refs); + const coerced = @as([]const u32, @ptrCast(refs)); sema.air_extra.appendSliceAssumeCapacity(coerced); } @@ -35916,10 +35916,10 @@ fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - return @intCast(u32, if (field.ty.isNoReturn(sema.mod)) + return @as(u32, @intCast(if (field.ty.isNoReturn(sema.mod)) 0 else - field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty)); + field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty))); } /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. @@ -35951,7 +35951,7 @@ fn unionFieldIndex( const union_obj = mod.typeToUnion(union_ty).?; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); - return @intCast(u32, field_index_usize); + return @as(u32, @intCast(field_index_usize)); } fn structFieldIndex( @@ -35969,7 +35969,7 @@ fn structFieldIndex( const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return @intCast(u32, field_index_usize); + return @as(u32, @intCast(field_index_usize)); } } @@ -35983,12 +35983,12 @@ fn anonStructFieldIndex( const mod = sema.mod; switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { - if (name == field_name) return @intCast(u32, i); + if (name == field_name) return @as(u32, @intCast(i)); }, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { for (struct_obj.fields.keys(), 0..) |name, i| { if (name == field_name) { - return @intCast(u32, i); + return @as(u32, @intCast(i)); } } }, @@ -36586,9 +36586,9 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen(mod)), - .alignment = @intCast(u32, parent_ty.abiAlignment(mod)), - .vector_index = if (offset) |some| @enumFromInt(VI, some) else .runtime, + .host_size = @as(u16, @intCast(parent_ty.arrayLen(mod))), + .alignment = @as(u32, @intCast(parent_ty.abiAlignment(mod))), + .vector_index = if (offset) |some| @as(VI, @enumFromInt(some)) else .runtime, }; } else .{}; @@ -36607,10 +36607,10 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { // The resulting pointer is aligned to the lcd between the offset (an // arbitrary number) and the alignment factor (always a power of two, // non zero). - const new_align = @enumFromInt(Alignment, @min( + const new_align = @as(Alignment, @enumFromInt(@min( @ctz(addend), @intFromEnum(ptr_info.flags.alignment), - )); + ))); assert(new_align != .none); break :a new_align; }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 1e8ab0fd87ea..5abcd7b2807b 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -250,7 +250,7 @@ pub fn print( }, .empty_enum_value => return writer.writeAll("(empty enum value)"), .float => |float| switch (float.storage) { - inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}), + inline else => |x| return writer.print("{d}", .{@as(f64, @floatCast(x))}), }, .ptr => |ptr| { if (ptr.addr == .int) { @@ -273,7 +273,7 @@ pub fn print( for (buf[0..max_len], 0..) |*c, i| { const elem = try val.elemValue(mod, i); if (elem.isUndef(mod)) break :str; - c.* = @intCast(u8, elem.toUnsignedInt(mod)); + c.* = @as(u8, @intCast(elem.toUnsignedInt(mod))); } const truncated = if (len > max_string_len) " (truncated)" else ""; return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); @@ -352,11 +352,11 @@ pub fn print( if (container_ty.isTuple(mod)) { try writer.print("[{d}]", .{field.index}); } - const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod); + const field_name = container_ty.structFieldName(@as(usize, @intCast(field.index)), mod); try writer.print(".{i}", .{field_name.fmt(ip)}); }, .Union => { - const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; + const field_name = container_ty.unionFields(mod).keys()[@as(usize, @intCast(field.index))]; try writer.print(".{i}", .{field_name.fmt(ip)}); }, .Pointer => { diff --git a/src/Zir.zig b/src/Zir.zig index 45ee755d6bdb..a51290acebf4 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -74,12 +74,12 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => code.extra[i], - Inst.Ref => @enumFromInt(Inst.Ref, code.extra[i]), - i32 => @bitCast(i32, code.extra[i]), - Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]), - Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]), - Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]), - Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]), + Inst.Ref => @as(Inst.Ref, @enumFromInt(code.extra[i])), + i32 => @as(i32, @bitCast(code.extra[i])), + Inst.Call.Flags => @as(Inst.Call.Flags, @bitCast(code.extra[i])), + Inst.BuiltinCall.Flags => @as(Inst.BuiltinCall.Flags, @bitCast(code.extra[i])), + Inst.SwitchBlock.Bits => @as(Inst.SwitchBlock.Bits, @bitCast(code.extra[i])), + Inst.FuncFancy.Bits => @as(Inst.FuncFancy.Bits, @bitCast(code.extra[i])), else => @compileError("bad field type"), }; i += 1; @@ -101,7 +101,7 @@ pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 { pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { const raw_slice = code.extra[start..][0..len]; - return @ptrCast([]Inst.Ref, raw_slice); + return @as([]Inst.Ref, @ptrCast(raw_slice)); } pub fn hasCompileErrors(code: Zir) bool { @@ -2992,7 +2992,7 @@ pub const Inst = struct { (@as(u128, self.piece1) << 32) | (@as(u128, self.piece2) << 64) | (@as(u128, self.piece3) << 96); - return @bitCast(f128, int_bits); + return @as(f128, @bitCast(int_bits)); } }; @@ -3228,15 +3228,15 @@ pub const DeclIterator = struct { } it.decl_i += 1; - const flags = @truncate(u4, it.cur_bit_bag); + const flags = @as(u4, @truncate(it.cur_bit_bag)); it.cur_bit_bag >>= 4; - const sub_index = @intCast(u32, it.extra_index); + const sub_index = @as(u32, @intCast(it.extra_index)); it.extra_index += 5; // src_hash(4) + line(1) const name = it.zir.nullTerminatedString(it.zir.extra[it.extra_index]); it.extra_index += 3; // name(1) + value(1) + doc_comment(1) - it.extra_index += @truncate(u1, flags >> 2); - it.extra_index += @truncate(u1, flags >> 3); + it.extra_index += @as(u1, @truncate(flags >> 2)); + it.extra_index += @as(u1, @truncate(flags >> 3)); return Item{ .sub_index = sub_index, @@ -3258,7 +3258,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { const extended = datas[decl_inst].extended; switch (extended.opcode) { .struct_decl => { - const small = @bitCast(Inst.StructDecl.Small, extended.small); + const small = @as(Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_fields_len); @@ -3281,7 +3281,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { return declIteratorInner(zir, extra_index, decls_len); }, .enum_decl => { - const small = @bitCast(Inst.EnumDecl.Small, extended.small); + const small = @as(Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); @@ -3296,7 +3296,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { return declIteratorInner(zir, extra_index, decls_len); }, .union_decl => { - const small = @bitCast(Inst.UnionDecl.Small, extended.small); + const small = @as(Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); @@ -3311,7 +3311,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { return declIteratorInner(zir, extra_index, decls_len); }, .opaque_decl => { - const small = @bitCast(Inst.OpaqueDecl.Small, extended.small); + const small = @as(Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); const decls_len = if (small.has_decls_len) decls_len: { @@ -3507,7 +3507,7 @@ fn findDeclsSwitch( const special_prong = extra.data.bits.specialProng(); if (special_prong != .none) { - const body_len = @truncate(u31, zir.extra[extra_index]); + const body_len = @as(u31, @truncate(zir.extra[extra_index])); extra_index += 1; const body = zir.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -3520,7 +3520,7 @@ fn findDeclsSwitch( var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; - const body_len = @truncate(u31, zir.extra[extra_index]); + const body_len = @as(u31, @truncate(zir.extra[extra_index])); extra_index += 1; const body = zir.extra[extra_index..][0..body_len]; extra_index += body_len; @@ -3535,7 +3535,7 @@ fn findDeclsSwitch( extra_index += 1; const ranges_len = zir.extra[extra_index]; extra_index += 1; - const body_len = @truncate(u31, zir.extra[extra_index]); + const body_len = @as(u31, @truncate(zir.extra[extra_index])); extra_index += 1; const items = zir.refSlice(extra_index, items_len); extra_index += items_len; @@ -3617,7 +3617,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { ret_ty_ref = .void_type; }, 1 => { - ret_ty_ref = @enumFromInt(Inst.Ref, zir.extra[extra_index]); + ret_ty_ref = @as(Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; }, else => { @@ -3671,7 +3671,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { ret_ty_body = zir.extra[extra_index..][0..body_len]; extra_index += ret_ty_body.len; } else if (extra.data.bits.has_ret_ty_ref) { - ret_ty_ref = @enumFromInt(Inst.Ref, zir.extra[extra_index]); + ret_ty_ref = @as(Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; } @@ -3715,7 +3715,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { - return @enumFromInt(Inst.Ref, ref_start_index + inst); + return @as(Inst.Ref, @enumFromInt(ref_start_index + inst)); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5080a0451a7c..1d09fcd1cd90 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -187,8 +187,8 @@ const DbgInfoReloc = struct { .stack_argument_offset, => |offset| blk: { const adjusted_offset = switch (reloc.mcv) { - .stack_offset => -@intCast(i32, offset), - .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset), + .stack_offset => -@as(i32, @intCast(offset)), + .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ .stack = .{ @@ -224,8 +224,8 @@ const DbgInfoReloc = struct { const adjusted_offset = switch (reloc.mcv) { .ptr_stack_offset, .stack_offset, - => -@intCast(i32, offset), - .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset), + => -@as(i32, @intCast(offset)), + .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ @@ -440,7 +440,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -460,11 +460,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } @@ -524,7 +524,7 @@ fn gen(self: *Self) !void { const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -547,7 +547,7 @@ fn gen(self: *Self) !void { self.saved_regs_stack_space = 16; inline for (callee_preserved_regs) |reg| { if (self.register_manager.isRegAllocated(reg)) { - saved_regs |= @as(u32, 1) << @intCast(u5, reg.id()); + saved_regs |= @as(u32, 1) << @as(u5, @intCast(reg.id())); self.saved_regs_stack_space += 8; } } @@ -597,14 +597,14 @@ fn gen(self: *Self) !void { for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.set(jmp_reloc, .{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) }, }); } // add sp, sp, #stack_size _ = try self.addInst(.{ .tag = .add_immediate, - .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @intCast(u12, stack_size) } }, + .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @as(u12, @intCast(stack_size)) } }, }); // @@ -948,15 +948,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1232,7 +1232,7 @@ fn truncRegister( .rd = dest_reg, .rn = operand_reg, .lsb = 0, - .width = @intCast(u6, int_bits), + .width = @as(u6, @intCast(int_bits)), } }, }); }, @@ -1877,7 +1877,7 @@ fn binOpImmediate( => .{ .rr_imm12_sh = .{ .rd = dest_reg, .rn = lhs_reg, - .imm12 = @intCast(u12, rhs_immediate), + .imm12 = @as(u12, @intCast(rhs_immediate)), } }, .lsl_immediate, .asr_immediate, @@ -1885,7 +1885,7 @@ fn binOpImmediate( => .{ .rr_shift = .{ .rd = dest_reg, .rn = lhs_reg, - .shift = @intCast(u6, rhs_immediate), + .shift = @as(u6, @intCast(rhs_immediate)), } }, else => unreachable, }; @@ -2526,9 +2526,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -2654,9 +2654,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2777,7 +2777,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } }, }); - const shift: u6 = @intCast(u6, @as(u7, 64) - @intCast(u7, int_info.bits)); + const shift: u6 = @as(u6, @intCast(@as(u7, 64) - @as(u7, @intCast(int_info.bits)))); if (shift > 0) { // lsl dest_high, dest, #shift _ = try self.addInst(.{ @@ -2837,7 +2837,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = dest_high_reg, .rn = dest_reg, - .shift = @intCast(u6, int_info.bits), + .shift = @as(u6, @intCast(int_info.bits)), } }, }); @@ -2878,9 +2878,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), @@ -2917,7 +2917,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = dest_reg, .rn = lhs_reg, - .shift = @intCast(u6, imm), + .shift = @as(u6, @intCast(imm)), } }, }); @@ -2932,7 +2932,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = reconstructed_reg, .rn = dest_reg, - .shift = @intCast(u6, imm), + .shift = @as(u6, @intCast(imm)), } }, }); } else { @@ -3072,7 +3072,7 @@ fn errUnionErr( return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); + const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3094,7 +3094,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; + const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3103,8 +3103,8 @@ fn errUnionErr( // Set both registers to the X variant to get the full width .rd = dest_reg.toX(), .rn = operand_reg.toX(), - .lsb = @intCast(u6, err_bit_offset), - .width = @intCast(u7, err_bit_size), + .lsb = @as(u6, @intCast(err_bit_offset)), + .width = @as(u7, @intCast(err_bit_size)), }, }, }); @@ -3152,7 +3152,7 @@ fn errUnionPayload( return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3174,7 +3174,7 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; + const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, @@ -3183,8 +3183,8 @@ fn errUnionPayload( // Set both registers to the X variant to get the full width .rd = dest_reg.toX(), .rn = operand_reg.toX(), - .lsb = @intCast(u5, payload_bit_offset), - .width = @intCast(u6, payload_bit_size), + .lsb = @as(u5, @intCast(payload_bit_offset)), + .width = @as(u6, @intCast(payload_bit_size)), }, }, }); @@ -3283,9 +3283,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod))); const optional_abi_align = optional_ty.abiAlignment(mod); - const offset = @intCast(u32, payload_ty.abiSize(mod)); + const offset = @as(u32, @intCast(payload_ty.abiSize(mod))); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3308,13 +3308,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -3332,13 +3332,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -3454,7 +3454,7 @@ fn ptrElemVal( ) !MCValue { const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3716,7 +3716,7 @@ fn genInlineMemcpy( _ = try self.addInst(.{ .tag = .b_cond, .data = .{ .inst_cond = .{ - .inst = @intCast(u32, self.mir_instructions.len + 5), + .inst = @as(u32, @intCast(self.mir_instructions.len + 5)), .cond = .ge, } }, }); @@ -3754,7 +3754,7 @@ fn genInlineMemcpy( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) }, }); // end: @@ -3824,7 +3824,7 @@ fn genInlineMemsetCode( _ = try self.addInst(.{ .tag = .b_cond, .data = .{ .inst_cond = .{ - .inst = @intCast(u32, self.mir_instructions.len + 4), + .inst = @as(u32, @intCast(self.mir_instructions.len + 4)), .cond = .ge, } }, }); @@ -3852,7 +3852,7 @@ fn genInlineMemsetCode( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) }, }); // end: @@ -4002,7 +4002,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type } }, }); }, - .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .linker_load => |load_struct| { const tag: Mir.Inst.Tag = switch (load_struct.type) { .got => .load_memory_ptr_got, @@ -4092,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4117,7 +4117,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index, mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .dead, .unreach => unreachable, @@ -4169,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod))); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4243,7 +4243,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; @@ -4269,8 +4269,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(mod); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); + const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod))); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); @@ -4314,7 +4314,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); @@ -4473,7 +4473,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4554,7 +4554,7 @@ fn cmp( .tag = .cmp_immediate, .data = .{ .r_imm12_sh = .{ .rn = lhs_reg, - .imm12 = @intCast(u12, rhs_immediate.?), + .imm12 = @as(u12, @intCast(rhs_immediate.?)), } }, }); } else { @@ -4696,7 +4696,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -4833,7 +4833,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(mod)); + const offset = @as(u32, @intCast(payload_ty.abiSize(mod))); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4841,7 +4841,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { const raw_reg = try self.register_manager.allocReg(null, gp); const dest_reg = raw_reg.toX(); - const shift = @intCast(u6, offset * 8); + const shift = @as(u6, @intCast(offset * 8)); if (shift == 0) { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { @@ -5026,7 +5026,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const start_index = @intCast(u32, self.mir_instructions.len); + const start_index = @as(u32, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start_index); @@ -5091,7 +5091,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); assert(items.len > 0); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -5209,9 +5209,9 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .b => self.mir_instructions.items(.data)[inst].inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -5262,12 +5262,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -5401,7 +5401,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5460,7 +5460,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5589,7 +5589,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .ldr_ptr_stack, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @intCast(u32, off), + .offset = @as(u32, @intCast(off)), } }, }); }, @@ -5605,13 +5605,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .immediate => |x| { _ = try self.addInst(.{ .tag = .movz, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x) } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } }, }); if (x & 0x0000_0000_ffff_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 16), .hw = 1 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } }, }); } @@ -5619,13 +5619,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (x & 0x0000_ffff_0000_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 32), .hw = 2 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } }, }); } if (x & 0xffff_0000_0000_0000 != 0) { _ = try self.addInst(.{ .tag = .movk, - .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 48), .hw = 3 } }, + .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } }, }); } } @@ -5696,7 +5696,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @intCast(u32, off), + .offset = @as(u32, @intCast(off)), } }, }); }, @@ -5720,7 +5720,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = tag, .data = .{ .load_store_stack = .{ .rt = reg, - .offset = @intCast(u32, off), + .offset = @as(u32, @intCast(off)), } }, }); }, @@ -5733,7 +5733,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5840,7 +5840,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I } }, }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .linker_load => |load_struct| { const tag: Mir.Inst.Tag = switch (load_struct.type) { .got => .load_memory_ptr_got, @@ -5937,7 +5937,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -6058,7 +6058,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch}); @@ -6105,7 +6105,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the @@ -6247,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6259,7 +6259,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6305,7 +6305,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6325,7 +6325,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 238a63c92112..8cf238613891 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -81,7 +81,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .add_immediate => try emit.mirAddSubtractImmediate(inst), .adds_immediate => try emit.mirAddSubtractImmediate(inst), @@ -324,7 +324,7 @@ fn lowerBranches(emit: *Emit) !void { // TODO optimization opportunity: do this in codegen while // generating MIR for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); if (isBranch(tag)) { const target_inst = emit.branchTarget(inst); @@ -369,7 +369,7 @@ fn lowerBranches(emit: *Emit) !void { var current_code_offset: usize = 0; for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); // If this instruction contained in the code offset // mapping (when it is a target of a branch or if it is a @@ -384,7 +384,7 @@ fn lowerBranches(emit: *Emit) !void { const target_inst = emit.branchTarget(inst); if (target_inst < inst) { const target_offset = emit.code_offset_mapping.get(target_inst).?; - const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset); + const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset)); const branch_type = emit.branch_types.getPtr(inst).?; const optimal_branch_type = try emit.optimalBranchType(tag, offset); if (branch_type.* != optimal_branch_type) { @@ -403,7 +403,7 @@ fn lowerBranches(emit: *Emit) !void { for (origin_list.items) |forward_branch_inst| { const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst]; const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?; - const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset); + const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset)); const branch_type = emit.branch_types.getPtr(forward_branch_inst).?; const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset); if (branch_type.* != optimal_branch_type) { @@ -434,7 +434,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); const delta_pc: usize = self.code.items.len - self.prev_di_pc; switch (self.debug_output) { .dwarf => |dw| { @@ -451,13 +451,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta - try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant); if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; @@ -548,13 +548,13 @@ fn mirConditionalBranchImmediate(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const inst_cond = emit.mir.instructions.items(.data)[inst].inst_cond; - const offset = @intCast(i64, emit.code_offset_mapping.get(inst_cond.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(inst_cond.inst).?)) - @as(i64, @intCast(emit.code.items.len)); const branch_type = emit.branch_types.get(inst).?; log.debug("mirConditionalBranchImmediate: {} offset={}", .{ inst, offset }); switch (branch_type) { .b_cond => switch (tag) { - .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @intCast(i21, offset))), + .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @as(i21, @intCast(offset)))), else => unreachable, }, else => unreachable, @@ -572,14 +572,14 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void { emit.mir.instructions.items(.tag)[target_inst], }); - const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len)); const branch_type = emit.branch_types.get(inst).?; log.debug("mirBranch: {} offset={}", .{ inst, offset }); switch (branch_type) { .unconditional_branch_immediate => switch (tag) { - .b => try emit.writeInstruction(Instruction.b(@intCast(i28, offset))), - .bl => try emit.writeInstruction(Instruction.bl(@intCast(i28, offset))), + .b => try emit.writeInstruction(Instruction.b(@as(i28, @intCast(offset)))), + .bl => try emit.writeInstruction(Instruction.bl(@as(i28, @intCast(offset)))), else => unreachable, }, else => unreachable, @@ -590,13 +590,13 @@ fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const r_inst = emit.mir.instructions.items(.data)[inst].r_inst; - const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(r_inst.inst).?)) - @as(i64, @intCast(emit.code.items.len)); const branch_type = emit.branch_types.get(inst).?; log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset }); switch (branch_type) { .cbz => switch (tag) { - .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))), + .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @as(i21, @intCast(offset)))), else => unreachable, }, else => unreachable, @@ -662,7 +662,7 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { const relocation = emit.mir.instructions.items(.data)[inst].relocation; const offset = blk: { - const offset = @intCast(u32, emit.code.items.len); + const offset = @as(u32, @intCast(emit.code.items.len)); // bl try emit.writeInstruction(Instruction.bl(0)); break :blk offset; @@ -837,11 +837,11 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const payload = emit.mir.instructions.items(.data)[inst].payload; const data = emit.mir.extraData(Mir.LoadMemoryPie, payload).data; - const reg = @enumFromInt(Register, data.register); + const reg = @as(Register, @enumFromInt(data.register)); // PC-relative displacement to the entry in memory. // adrp - const offset = @intCast(u32, emit.code.items.len); + const offset = @as(u32, @intCast(emit.code.items.len)); try emit.writeInstruction(Instruction.adrp(reg.toX(), 0)); switch (tag) { @@ -1220,7 +1220,7 @@ fn mirNop(emit: *Emit) !void { } fn regListIsSet(reg_list: u32, reg: Register) bool { - return reg_list & @as(u32, 1) << @intCast(u5, reg.id()) != 0; + return reg_list & @as(u32, 1) << @as(u5, @intCast(reg.id())) != 0; } fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void { @@ -1245,7 +1245,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void { var count: u6 = 0; var other_reg: ?Register = null; while (i > 0) : (i -= 1) { - const reg = @enumFromInt(Register, i - 1); + const reg = @as(Register, @enumFromInt(i - 1)); if (regListIsSet(reg_list, reg)) { if (count == 0 and odd_number_of_regs) { try emit.writeInstruction(Instruction.ldr( @@ -1274,7 +1274,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void { var count: u6 = 0; var other_reg: ?Register = null; while (i < 32) : (i += 1) { - const reg = @enumFromInt(Register, i); + const reg = @as(Register, @enumFromInt(i)); if (regListIsSet(reg_list, reg)) { if (count == number_of_regs - 1 and odd_number_of_regs) { try emit.writeInstruction(Instruction.str( diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig index cc478c874a7c..6c0a1ec5b48c 100644 --- a/src/arch/aarch64/Mir.zig +++ b/src/arch/aarch64/Mir.zig @@ -507,7 +507,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig index 3446d6995091..6e4508fb0e4c 100644 --- a/src/arch/aarch64/bits.zig +++ b/src/arch/aarch64/bits.zig @@ -80,34 +80,34 @@ pub const Register = enum(u8) { pub fn id(self: Register) u6 { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.x0)), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.w0)), + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))), + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))), @intFromEnum(Register.sp) => 32, @intFromEnum(Register.wsp) => 32, - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.q0) + 33), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.d0) + 33), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.s0) + 33), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.h0) + 33), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.b0) + 33), + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0) + 33)), + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0) + 33)), + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0) + 33)), + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0) + 33)), + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0) + 33)), else => unreachable, }; } pub fn enc(self: Register) u5 { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.x0)), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.w0)), + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))), + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))), @intFromEnum(Register.sp) => 31, @intFromEnum(Register.wsp) => 31, - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.q0)), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.d0)), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.s0)), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.h0)), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.b0)), + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0))), + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0))), + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0))), + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0))), + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0))), else => unreachable, }; } @@ -133,13 +133,13 @@ pub const Register = enum(u8) { /// Convert from a general-purpose register to its 64 bit alias. pub fn toX(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt( + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0)), ), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt( + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0)), ), else => unreachable, }; @@ -148,13 +148,13 @@ pub const Register = enum(u8) { /// Convert from a general-purpose register to its 32 bit alias. pub fn toW(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt( + @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0)), ), - @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt( + @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0)), ), else => unreachable, }; @@ -163,25 +163,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 128 bit alias. pub fn toQ(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0)), ), else => unreachable, }; @@ -190,25 +190,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 64 bit alias. pub fn toD(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0)), ), else => unreachable, }; @@ -217,25 +217,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 32 bit alias. pub fn toS(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0)), ), else => unreachable, }; @@ -244,25 +244,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 16 bit alias. pub fn toH(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0)), ), else => unreachable, }; @@ -271,25 +271,25 @@ pub const Register = enum(u8) { /// Convert from a floating-point register to its 8 bit alias. pub fn toB(self: Register) Register { return switch (@intFromEnum(self)) { - @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt( + @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt( + @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt( + @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt( + @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0)), ), - @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt( + @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as( Register, - @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0), + @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0)), ), else => unreachable, }; @@ -612,27 +612,27 @@ pub const Instruction = union(enum) { pub fn toU32(self: Instruction) u32 { return switch (self) { - .move_wide_immediate => |v| @bitCast(u32, v), - .pc_relative_address => |v| @bitCast(u32, v), - .load_store_register => |v| @bitCast(u32, v), - .load_store_register_pair => |v| @bitCast(u32, v), - .load_literal => |v| @bitCast(u32, v), - .exception_generation => |v| @bitCast(u32, v), - .unconditional_branch_register => |v| @bitCast(u32, v), - .unconditional_branch_immediate => |v| @bitCast(u32, v), - .no_operation => |v| @bitCast(u32, v), - .logical_shifted_register => |v| @bitCast(u32, v), - .add_subtract_immediate => |v| @bitCast(u32, v), - .logical_immediate => |v| @bitCast(u32, v), - .bitfield => |v| @bitCast(u32, v), - .add_subtract_shifted_register => |v| @bitCast(u32, v), - .add_subtract_extended_register => |v| @bitCast(u32, v), + .move_wide_immediate => |v| @as(u32, @bitCast(v)), + .pc_relative_address => |v| @as(u32, @bitCast(v)), + .load_store_register => |v| @as(u32, @bitCast(v)), + .load_store_register_pair => |v| @as(u32, @bitCast(v)), + .load_literal => |v| @as(u32, @bitCast(v)), + .exception_generation => |v| @as(u32, @bitCast(v)), + .unconditional_branch_register => |v| @as(u32, @bitCast(v)), + .unconditional_branch_immediate => |v| @as(u32, @bitCast(v)), + .no_operation => |v| @as(u32, @bitCast(v)), + .logical_shifted_register => |v| @as(u32, @bitCast(v)), + .add_subtract_immediate => |v| @as(u32, @bitCast(v)), + .logical_immediate => |v| @as(u32, @bitCast(v)), + .bitfield => |v| @as(u32, @bitCast(v)), + .add_subtract_shifted_register => |v| @as(u32, @bitCast(v)), + .add_subtract_extended_register => |v| @as(u32, @bitCast(v)), // TODO once packed structs work, this can be refactored .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25), .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31), .conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31, - .data_processing_3_source => |v| @bitCast(u32, v), - .data_processing_2_source => |v| @bitCast(u32, v), + .data_processing_3_source => |v| @as(u32, @bitCast(v)), + .data_processing_2_source => |v| @as(u32, @bitCast(v)), }; } @@ -650,7 +650,7 @@ pub const Instruction = union(enum) { .move_wide_immediate = .{ .rd = rd.enc(), .imm16 = imm16, - .hw = @intCast(u2, shift / 16), + .hw = @as(u2, @intCast(shift / 16)), .opc = opc, .sf = switch (rd.size()) { 32 => 0, @@ -663,12 +663,12 @@ pub const Instruction = union(enum) { fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction { assert(rd.size() == 64); - const imm21_u = @bitCast(u21, imm21); + const imm21_u = @as(u21, @bitCast(imm21)); return Instruction{ .pc_relative_address = .{ .rd = rd.enc(), - .immlo = @truncate(u2, imm21_u), - .immhi = @truncate(u19, imm21_u >> 2), + .immlo = @as(u2, @truncate(imm21_u)), + .immhi = @as(u19, @truncate(imm21_u >> 2)), .op = op, }, }; @@ -704,15 +704,15 @@ pub const Instruction = union(enum) { pub fn toU12(self: LoadStoreOffset) u12 { return switch (self) { .immediate => |imm_type| switch (imm_type) { - .post_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 1, - .pre_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 3, + .post_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 1, + .pre_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 3, .unsigned => |v| v, }, .register => |r| switch (r.shift) { - .uxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 16 + 2050, - .lsl => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 24 + 2050, - .sxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 48 + 2050, - .sxtx => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 56 + 2050, + .uxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 16 + 2050, + .lsl => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 24 + 2050, + .sxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 48 + 2050, + .sxtx => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 56 + 2050, }, }; } @@ -894,7 +894,7 @@ pub const Instruction = union(enum) { switch (rt1.size()) { 32 => { assert(-256 <= offset and offset <= 252); - const imm7 = @truncate(u7, @bitCast(u9, offset >> 2)); + const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 2)))); return Instruction{ .load_store_register_pair = .{ .rt1 = rt1.enc(), @@ -909,7 +909,7 @@ pub const Instruction = union(enum) { }, 64 => { assert(-512 <= offset and offset <= 504); - const imm7 = @truncate(u7, @bitCast(u9, offset >> 3)); + const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 3)))); return Instruction{ .load_store_register_pair = .{ .rt1 = rt1.enc(), @@ -982,7 +982,7 @@ pub const Instruction = union(enum) { ) Instruction { return Instruction{ .unconditional_branch_immediate = .{ - .imm26 = @bitCast(u26, @intCast(i26, offset >> 2)), + .imm26 = @as(u26, @bitCast(@as(i26, @intCast(offset >> 2)))), .op = op, }, }; @@ -1188,7 +1188,7 @@ pub const Instruction = union(enum) { .conditional_branch = .{ .cond = @intFromEnum(cond), .o0 = o0, - .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)), + .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))), .o1 = o1, }, }; @@ -1204,7 +1204,7 @@ pub const Instruction = union(enum) { return Instruction{ .compare_and_branch = .{ .rt = rt.enc(), - .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)), + .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))), .op = op, .sf = switch (rt.size()) { 32 => 0b0, @@ -1609,12 +1609,12 @@ pub const Instruction = union(enum) { } pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction { - const imms = @intCast(u6, rd.size() - 1); + const imms = @as(u6, @intCast(rd.size() - 1)); return sbfm(rd, rn, shift, imms); } pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction { - return sbfm(rd, rn, lsb, @intCast(u6, lsb + width - 1)); + return sbfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1))); } pub fn sxtb(rd: Register, rn: Register) Instruction { @@ -1631,17 +1631,17 @@ pub const Instruction = union(enum) { } pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction { - const size = @intCast(u6, rd.size() - 1); + const size = @as(u6, @intCast(rd.size() - 1)); return ubfm(rd, rn, size - shift + 1, size - shift); } pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction { - const imms = @intCast(u6, rd.size() - 1); + const imms = @as(u6, @intCast(rd.size() - 1)); return ubfm(rd, rn, shift, imms); } pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction { - return ubfm(rd, rn, lsb, @intCast(u6, lsb + width - 1)); + return ubfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1))); } pub fn uxtb(rd: Register, rn: Register) Instruction { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 7ece4ba2e3a3..885a07ec6e4b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -266,8 +266,8 @@ const DbgInfoReloc = struct { .stack_argument_offset, => blk: { const adjusted_stack_offset = switch (reloc.mcv) { - .stack_offset => |offset| -@intCast(i32, offset), - .stack_argument_offset => |offset| @intCast(i32, function.saved_regs_stack_space + offset), + .stack_offset => |offset| -@as(i32, @intCast(offset)), + .stack_argument_offset => |offset| @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ .stack = .{ @@ -303,8 +303,8 @@ const DbgInfoReloc = struct { const adjusted_offset = switch (reloc.mcv) { .ptr_stack_offset, .stack_offset, - => -@intCast(i32, offset), - .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset), + => -@as(i32, @intCast(offset)), + .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)), else => unreachable, }; break :blk .{ .stack = .{ @@ -446,7 +446,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -466,11 +466,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } @@ -522,7 +522,7 @@ fn gen(self: *Self) !void { const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -588,7 +588,7 @@ fn gen(self: *Self) !void { for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.set(jmp_reloc, .{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) }, }); } @@ -934,15 +934,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1201,7 +1201,7 @@ fn truncRegister( .rd = dest_reg, .rn = operand_reg, .lsb = 0, - .width = @intCast(u6, int_bits), + .width = @as(u6, @intCast(int_bits)), } }, }); } @@ -1591,9 +1591,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -1704,9 +1704,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -1866,9 +1866,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), @@ -1915,7 +1915,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = dest_reg, .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))), } }, }); @@ -1927,7 +1927,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .rr_shift = .{ .rd = reconstructed_reg, .rm = dest_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))), } }, }); } else { @@ -2020,7 +2020,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(optional_ty.abiSize(mod))); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2049,7 +2049,7 @@ fn errUnionErr( return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); + const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2071,15 +2071,15 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; + const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, - .lsb = @intCast(u5, err_bit_offset), - .width = @intCast(u6, err_bit_size), + .lsb = @as(u5, @intCast(err_bit_offset)), + .width = @as(u6, @intCast(err_bit_size)), } }, }); @@ -2126,7 +2126,7 @@ fn errUnionPayload( return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2148,15 +2148,15 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; + const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, - .lsb = @intCast(u5, payload_bit_offset), - .width = @intCast(u6, payload_bit_size), + .lsb = @as(u5, @intCast(payload_bit_offset)), + .width = @as(u6, @intCast(payload_bit_size)), } }, }); @@ -2235,13 +2235,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); - const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); + const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst))); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2259,13 +2259,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const abi_align = error_union_ty.abiAlignment(mod); - const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); + const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst))); const payload_off = errUnionPayloadOffset(payload_ty, mod); const err_off = errUnionErrorOffset(payload_ty, mod); - try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); + try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand); + try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2369,7 +2369,7 @@ fn ptrElemVal( ) !MCValue { const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); switch (elem_size) { 1, 4 => { @@ -2480,7 +2480,7 @@ fn arrayElemVal( => { const ptr_to_mcv = switch (mcv) { .stack_offset => |off| MCValue{ .ptr_stack_offset = off }, - .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) }, + .memory => |addr| MCValue{ .immediate = @as(u32, @intCast(addr)) }, .stack_argument_offset => |off| blk: { const reg = try self.register_manager.allocReg(null, gp); @@ -2654,7 +2654,7 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); switch (ptr) { .none => unreachable, @@ -2759,7 +2759,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_size = @intCast(u32, value_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(value_ty.abiSize(mod))); switch (ptr) { .none => unreachable, @@ -2814,7 +2814,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type // sub src_reg, fp, #off try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .stack_argument_offset => |off| { _ = try self.addInst(.{ .tag = .ldr_ptr_stack_argument, @@ -2882,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2906,7 +2906,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { @@ -2970,15 +2970,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; + const field_bit_size = @as(u32, @intCast(struct_field_ty.abiSize(mod))) * 8; _ = try self.addInst(.{ .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, - .lsb = @intCast(u5, field_bit_offset), - .width = @intCast(u6, field_bit_size), + .lsb = @as(u5, @intCast(field_bit_offset)), + .width = @as(u6, @intCast(field_bit_size)), } }, }); @@ -3003,7 +3003,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod))); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3364,7 +3364,7 @@ fn binOpImmediate( => .{ .rr_shift = .{ .rd = dest_reg, .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)), + .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_immediate))), } }, else => unreachable, }; @@ -3895,7 +3895,7 @@ fn ptrArithmetic( .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type else => ptr_ty.childType(mod), }; - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -4022,7 +4022,7 @@ fn genInlineMemcpy( _ = try self.addInst(.{ .tag = .b, .cond = .ge, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 5) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 5)) }, }); // ldrb tmp, [src, count] @@ -4058,7 +4058,7 @@ fn genInlineMemcpy( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) }, }); // end: @@ -4126,7 +4126,7 @@ fn genInlineMemsetCode( _ = try self.addInst(.{ .tag = .b, .cond = .ge, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 4) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 4)) }, }); // strb val, [src, count] @@ -4152,7 +4152,7 @@ fn genInlineMemsetCode( // b loop _ = try self.addInst(.{ .tag = .b, - .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) }, + .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) }, }); // end: @@ -4216,7 +4216,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; @@ -4248,8 +4248,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(mod); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); + const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod))); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ptr_ty = try mod.singleMutPtrType(ret_ty); @@ -4294,7 +4294,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO @@ -4425,7 +4425,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4651,7 +4651,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -4956,7 +4956,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const start_index = @intCast(Mir.Inst.Index, self.mir_instructions.len); + const start_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start_index); @@ -5021,7 +5021,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); assert(items.len > 0); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -5139,7 +5139,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len), + .b => self.mir_instructions.items(.data)[inst].inst = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -5188,12 +5188,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -5323,7 +5323,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5376,7 +5376,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro }, 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset))); } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ @@ -5404,7 +5404,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5457,7 +5457,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro // sub src_reg, fp, #off try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .stack_argument_offset => |off| { _ = try self.addInst(.{ .tag = .ldr_ptr_stack_argument, @@ -5554,7 +5554,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .movw, .data = .{ .r_imm16 = .{ .rd = reg, - .imm16 = @intCast(u16, x), + .imm16 = @as(u16, @intCast(x)), } }, }); } else { @@ -5562,7 +5562,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .mov, .data = .{ .r_op_mov = .{ .rd = reg, - .op = Instruction.Operand.imm(@truncate(u8, x), 0), + .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0), } }, }); _ = try self.addInst(.{ @@ -5570,7 +5570,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12), } }, }); } @@ -5585,14 +5585,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .movw, .data = .{ .r_imm16 = .{ .rd = reg, - .imm16 = @truncate(u16, x), + .imm16 = @as(u16, @truncate(x)), } }, }); _ = try self.addInst(.{ .tag = .movt, .data = .{ .r_imm16 = .{ .rd = reg, - .imm16 = @truncate(u16, x >> 16), + .imm16 = @as(u16, @truncate(x >> 16)), } }, }); } else { @@ -5605,7 +5605,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .tag = .mov, .data = .{ .r_op_mov = .{ .rd = reg, - .op = Instruction.Operand.imm(@truncate(u8, x), 0), + .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0), } }, }); _ = try self.addInst(.{ @@ -5613,7 +5613,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12), } }, }); _ = try self.addInst(.{ @@ -5621,7 +5621,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 16), 8), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 16)), 8), } }, }); _ = try self.addInst(.{ @@ -5629,7 +5629,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .rr_op = .{ .rd = reg, .rn = reg, - .op = Instruction.Operand.imm(@truncate(u8, x >> 24), 4), + .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 24)), 4), } }, }); } @@ -5654,12 +5654,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @intCast(addr)) }); try self.genLdrRegister(reg, reg, ty); }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, @@ -5677,7 +5677,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(off))); } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ @@ -5693,7 +5693,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }); } else { const offset = if (off <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, off)); + break :blk Instruction.Offset.imm(@as(u12, @intCast(off))); } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ @@ -5732,7 +5732,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5771,7 +5771,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I }, 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset))); } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ @@ -5814,7 +5814,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I // sub src_reg, fp, #off try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off }); }, - .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), + .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }), .stack_argument_offset => |off| { _ = try self.addInst(.{ .tag = .ldr_ptr_stack_argument, @@ -5893,7 +5893,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -6010,7 +6010,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for arm", .{}); @@ -6058,7 +6058,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.typeOf(pl_op.operand); const mod = self.bin_file.options.module.?; - const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the @@ -6141,7 +6141,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .none => .none, .undef => .undef, .load_got, .load_direct, .load_tlv => unreachable, // TODO - .immediate => |imm| .{ .immediate = @truncate(u32, imm) }, + .immediate => |imm| .{ .immediate = @as(u32, @truncate(imm)) }, .memory => |addr| .{ .memory = addr }, }, .fail => |msg| { @@ -6198,7 +6198,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6216,7 +6216,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForward(usize, ncrn, 2); - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6245,7 +6245,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6264,7 +6264,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 17415318deee..54062d00a78d 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -78,7 +78,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .add => try emit.mirDataProcessing(inst), .adds => try emit.mirDataProcessing(inst), @@ -241,7 +241,7 @@ fn lowerBranches(emit: *Emit) !void { // TODO optimization opportunity: do this in codegen while // generating MIR for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); if (isBranch(tag)) { const target_inst = emit.branchTarget(inst); @@ -286,7 +286,7 @@ fn lowerBranches(emit: *Emit) !void { var current_code_offset: usize = 0; for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); // If this instruction contained in the code offset // mapping (when it is a target of a branch or if it is a @@ -301,7 +301,7 @@ fn lowerBranches(emit: *Emit) !void { const target_inst = emit.branchTarget(inst); if (target_inst < inst) { const target_offset = emit.code_offset_mapping.get(target_inst).?; - const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset + 8); + const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset + 8)); const branch_type = emit.branch_types.getPtr(inst).?; const optimal_branch_type = try emit.optimalBranchType(tag, offset); if (branch_type.* != optimal_branch_type) { @@ -320,7 +320,7 @@ fn lowerBranches(emit: *Emit) !void { for (origin_list.items) |forward_branch_inst| { const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst]; const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?; - const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset + 8); + const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset + 8)); const branch_type = emit.branch_types.getPtr(forward_branch_inst).?; const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset); if (branch_type.* != optimal_branch_type) { @@ -351,7 +351,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); const delta_pc: usize = self.code.items.len - self.prev_di_pc; switch (self.debug_output) { .dwarf => |dw| { @@ -368,13 +368,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta - try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant); if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; @@ -448,13 +448,13 @@ fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void { const scratch: Register = .r4; if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) { - try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32))); - try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16))); + try emit.writeInstruction(Instruction.movw(cond, scratch, @as(u16, @truncate(imm32)))); + try emit.writeInstruction(Instruction.movt(cond, scratch, @as(u16, @truncate(imm32 >> 16)))); } else { - try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0))); - try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12))); - try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8))); - try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4))); + try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32)), 0))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 8)), 12))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 16)), 8))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 24)), 4))); } break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none); @@ -484,12 +484,12 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void { const cond = emit.mir.instructions.items(.cond)[inst]; const target_inst = emit.mir.instructions.items(.data)[inst].inst; - const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len + 8); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len + 8)); const branch_type = emit.branch_types.get(inst).?; switch (branch_type) { .b => switch (tag) { - .b => try emit.writeInstruction(Instruction.b(cond, @intCast(i26, offset))), + .b => try emit.writeInstruction(Instruction.b(cond, @as(i26, @intCast(offset)))), else => unreachable, }, } @@ -585,7 +585,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void { .ldrb_stack_argument, => { const offset = if (raw_offset <= math.maxInt(u12)) blk: { - break :blk Instruction.Offset.imm(@intCast(u12, raw_offset)); + break :blk Instruction.Offset.imm(@as(u12, @intCast(raw_offset))); } else return emit.fail("TODO mirLoadStack larger offsets", .{}); switch (tag) { @@ -599,7 +599,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void { .ldrsh_stack_argument, => { const offset = if (raw_offset <= math.maxInt(u8)) blk: { - break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset)); + break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(raw_offset))); } else return emit.fail("TODO mirLoadStack larger offsets", .{}); switch (tag) { diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig index 736d0574bbc4..e890aaf29c4a 100644 --- a/src/arch/arm/Mir.zig +++ b/src/arch/arm/Mir.zig @@ -287,7 +287,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index a4a4fe472b33..2e1e26d22045 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -13,7 +13,7 @@ pub const Class = union(enum) { i64_array: u8, fn arrSize(total_size: u64, arr_size: u64) Class { - const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size); + const count = @as(u8, @intCast(std.mem.alignForward(u64, total_size, arr_size) / arr_size)); if (arr_size == 32) { return .{ .i32_array = count }; } else { diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig index 1de40a7059bc..6c33f3e82a7a 100644 --- a/src/arch/arm/bits.zig +++ b/src/arch/arm/bits.zig @@ -159,7 +159,7 @@ pub const Register = enum(u5) { /// Returns the unique 4-bit ID of this register which is used in /// the machine code pub fn id(self: Register) u4 { - return @truncate(u4, @intFromEnum(self)); + return @as(u4, @truncate(@intFromEnum(self))); } pub fn dwarfLocOp(self: Register) u8 { @@ -399,8 +399,8 @@ pub const Instruction = union(enum) { pub fn toU8(self: Shift) u8 { return switch (self) { - .register => |v| @bitCast(u8, v), - .immediate => |v| @bitCast(u8, v), + .register => |v| @as(u8, @bitCast(v)), + .immediate => |v| @as(u8, @bitCast(v)), }; } @@ -425,8 +425,8 @@ pub const Instruction = union(enum) { pub fn toU12(self: Operand) u12 { return switch (self) { - .register => |v| @bitCast(u12, v), - .immediate => |v| @bitCast(u12, v), + .register => |v| @as(u12, @bitCast(v)), + .immediate => |v| @as(u12, @bitCast(v)), }; } @@ -463,8 +463,8 @@ pub const Instruction = union(enum) { if (x & mask == x) { break Operand{ .immediate = .{ - .imm = @intCast(u8, std.math.rotl(u32, x, 2 * i)), - .rotate = @intCast(u4, i), + .imm = @as(u8, @intCast(std.math.rotl(u32, x, 2 * i))), + .rotate = @as(u4, @intCast(i)), }, }; } @@ -522,7 +522,7 @@ pub const Instruction = union(enum) { pub fn toU12(self: Offset) u12 { return switch (self) { - .register => |v| @bitCast(u12, v), + .register => |v| @as(u12, @bitCast(v)), .immediate => |v| v, }; } @@ -604,20 +604,20 @@ pub const Instruction = union(enum) { pub fn toU32(self: Instruction) u32 { return switch (self) { - .data_processing => |v| @bitCast(u32, v), - .multiply => |v| @bitCast(u32, v), - .multiply_long => |v| @bitCast(u32, v), - .signed_multiply_halfwords => |v| @bitCast(u32, v), - .integer_saturating_arithmetic => |v| @bitCast(u32, v), - .bit_field_extract => |v| @bitCast(u32, v), - .single_data_transfer => |v| @bitCast(u32, v), - .extra_load_store => |v| @bitCast(u32, v), - .block_data_transfer => |v| @bitCast(u32, v), - .branch => |v| @bitCast(u32, v), - .branch_exchange => |v| @bitCast(u32, v), - .supervisor_call => |v| @bitCast(u32, v), + .data_processing => |v| @as(u32, @bitCast(v)), + .multiply => |v| @as(u32, @bitCast(v)), + .multiply_long => |v| @as(u32, @bitCast(v)), + .signed_multiply_halfwords => |v| @as(u32, @bitCast(v)), + .integer_saturating_arithmetic => |v| @as(u32, @bitCast(v)), + .bit_field_extract => |v| @as(u32, @bitCast(v)), + .single_data_transfer => |v| @as(u32, @bitCast(v)), + .extra_load_store => |v| @as(u32, @bitCast(v)), + .block_data_transfer => |v| @as(u32, @bitCast(v)), + .branch => |v| @as(u32, @bitCast(v)), + .branch_exchange => |v| @as(u32, @bitCast(v)), + .supervisor_call => |v| @as(u32, @bitCast(v)), .undefined_instruction => |v| v.imm32, - .breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20), + .breakpoint => |v| @as(u32, @intCast(v.imm4)) | (@as(u32, @intCast(v.fixed_1)) << 4) | (@as(u32, @intCast(v.imm12)) << 8) | (@as(u32, @intCast(v.fixed_2_and_cond)) << 20), }; } @@ -656,9 +656,9 @@ pub const Instruction = union(enum) { .i = 1, .opcode = if (top) 0b1010 else 0b1000, .s = 0, - .rn = @truncate(u4, imm >> 12), + .rn = @as(u4, @truncate(imm >> 12)), .rd = rd.id(), - .op2 = @truncate(u12, imm), + .op2 = @as(u12, @truncate(imm)), }, }; } @@ -760,7 +760,7 @@ pub const Instruction = union(enum) { .rn = rn.id(), .lsb = lsb, .rd = rd.id(), - .widthm1 = @intCast(u5, width - 1), + .widthm1 = @as(u5, @intCast(width - 1)), .unsigned = unsigned, .cond = @intFromEnum(cond), }, @@ -810,11 +810,11 @@ pub const Instruction = union(enum) { offset: ExtraLoadStoreOffset, ) Instruction { const imm4l: u4 = switch (offset) { - .immediate => |imm| @truncate(u4, imm), + .immediate => |imm| @as(u4, @truncate(imm)), .register => |reg| reg, }; const imm4h: u4 = switch (offset) { - .immediate => |imm| @truncate(u4, imm >> 4), + .immediate => |imm| @as(u4, @truncate(imm >> 4)), .register => 0b0000, }; @@ -853,7 +853,7 @@ pub const Instruction = union(enum) { ) Instruction { return Instruction{ .block_data_transfer = .{ - .register_list = @bitCast(u16, reg_list), + .register_list = @as(u16, @bitCast(reg_list)), .rn = rn.id(), .load_store = load_store, .write_back = @intFromBool(write_back), @@ -870,7 +870,7 @@ pub const Instruction = union(enum) { .branch = .{ .cond = @intFromEnum(cond), .link = link, - .offset = @bitCast(u24, @intCast(i24, offset >> 2)), + .offset = @as(u24, @bitCast(@as(i24, @intCast(offset >> 2)))), }, }; } @@ -904,8 +904,8 @@ pub const Instruction = union(enum) { fn breakpoint(imm: u16) Instruction { return Instruction{ .breakpoint = .{ - .imm12 = @truncate(u12, imm >> 4), - .imm4 = @truncate(u4, imm), + .imm12 = @as(u12, @truncate(imm >> 4)), + .imm4 = @as(u4, @truncate(imm)), }, }; } @@ -1319,7 +1319,7 @@ pub const Instruction = union(enum) { const reg = @as(Register, arg); register_list |= @as(u16, 1) << reg.id(); } - return ldm(cond, .sp, true, @bitCast(RegisterList, register_list)); + return ldm(cond, .sp, true, @as(RegisterList, @bitCast(register_list))); } } @@ -1343,7 +1343,7 @@ pub const Instruction = union(enum) { const reg = @as(Register, arg); register_list |= @as(u16, 1) << reg.id(); } - return stmdb(cond, .sp, true, @bitCast(RegisterList, register_list)); + return stmdb(cond, .sp, true, @as(RegisterList, @bitCast(register_list))); } } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index cba1de92c1ed..d6bb9f82005a 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -323,7 +323,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -336,11 +336,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } @@ -752,15 +752,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1709,7 +1709,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); @@ -1747,7 +1747,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, @@ -2139,12 +2139,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -2289,20 +2289,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { - const x = @bitCast(i64, unsigned_x); + const x = @as(i64, @bitCast(unsigned_x)); if (math.minInt(i12) <= x and x <= math.maxInt(i12)) { _ = try self.addInst(.{ .tag = .addi, .data = .{ .i_type = .{ .rd = reg, .rs1 = .zero, - .imm12 = @intCast(i12, x), + .imm12 = @as(i12, @intCast(x)), } }, }); } else if (math.minInt(i32) <= x and x <= math.maxInt(i32)) { - const lo12 = @truncate(i12, x); + const lo12 = @as(i12, @truncate(x)); const carry: i32 = if (lo12 < 0) 1 else 0; - const hi20 = @truncate(i20, (x >> 12) +% carry); + const hi20 = @as(i20, @truncate((x >> 12) +% carry)); // TODO: add test case for 32-bit immediate _ = try self.addInst(.{ @@ -2501,7 +2501,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for riscv64", .{}); @@ -2653,7 +2653,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2690,7 +2690,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 3b330cbd3f05..20f2c40ba444 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -39,7 +39,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .add => try emit.mirRType(inst), .sub => try emit.mirRType(inst), @@ -85,7 +85,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line)); const delta_pc: usize = self.code.items.len - self.prev_di_pc; switch (self.debug_output) { .dwarf => |dw| { @@ -102,13 +102,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta - try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant); if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 8905b24c3cc7..da62a6894193 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -135,7 +135,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig index 5db3bf4f0520..2239bd49f8c4 100644 --- a/src/arch/riscv64/bits.zig +++ b/src/arch/riscv64/bits.zig @@ -56,12 +56,12 @@ pub const Instruction = union(enum) { // TODO: once packed structs work we can remove this monstrosity. pub fn toU32(self: Instruction) u32 { return switch (self) { - .R => |v| @bitCast(u32, v), - .I => |v| @bitCast(u32, v), - .S => |v| @bitCast(u32, v), - .B => |v| @intCast(u32, v.opcode) + (@intCast(u32, v.imm11) << 7) + (@intCast(u32, v.imm1_4) << 8) + (@intCast(u32, v.funct3) << 12) + (@intCast(u32, v.rs1) << 15) + (@intCast(u32, v.rs2) << 20) + (@intCast(u32, v.imm5_10) << 25) + (@intCast(u32, v.imm12) << 31), - .U => |v| @bitCast(u32, v), - .J => |v| @bitCast(u32, v), + .R => |v| @as(u32, @bitCast(v)), + .I => |v| @as(u32, @bitCast(v)), + .S => |v| @as(u32, @bitCast(v)), + .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31), + .U => |v| @as(u32, @bitCast(v)), + .J => |v| @as(u32, @bitCast(v)), }; } @@ -80,7 +80,7 @@ pub const Instruction = union(enum) { // RISC-V is all signed all the time -- convert immediates to unsigned for processing fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction { - const umm = @bitCast(u12, imm); + const umm = @as(u12, @bitCast(imm)); return Instruction{ .I = .{ @@ -94,7 +94,7 @@ pub const Instruction = union(enum) { } fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction { - const umm = @bitCast(u12, imm); + const umm = @as(u12, @bitCast(imm)); return Instruction{ .S = .{ @@ -102,8 +102,8 @@ pub const Instruction = union(enum) { .funct3 = fn3, .rs1 = r1.id(), .rs2 = r2.id(), - .imm0_4 = @truncate(u5, umm), - .imm5_11 = @truncate(u7, umm >> 5), + .imm0_4 = @as(u5, @truncate(umm)), + .imm5_11 = @as(u7, @truncate(umm >> 5)), }, }; } @@ -111,7 +111,7 @@ pub const Instruction = union(enum) { // Use significance value rather than bit value, same for J-type // -- less burden on callsite, bonus semantic checking fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction { - const umm = @bitCast(u13, imm); + const umm = @as(u13, @bitCast(imm)); assert(umm % 2 == 0); // misaligned branch target return Instruction{ @@ -120,17 +120,17 @@ pub const Instruction = union(enum) { .funct3 = fn3, .rs1 = r1.id(), .rs2 = r2.id(), - .imm1_4 = @truncate(u4, umm >> 1), - .imm5_10 = @truncate(u6, umm >> 5), - .imm11 = @truncate(u1, umm >> 11), - .imm12 = @truncate(u1, umm >> 12), + .imm1_4 = @as(u4, @truncate(umm >> 1)), + .imm5_10 = @as(u6, @truncate(umm >> 5)), + .imm11 = @as(u1, @truncate(umm >> 11)), + .imm12 = @as(u1, @truncate(umm >> 12)), }, }; } // We have to extract the 20 bits anyway -- let's not make it more painful fn uType(op: u7, rd: Register, imm: i20) Instruction { - const umm = @bitCast(u20, imm); + const umm = @as(u20, @bitCast(imm)); return Instruction{ .U = .{ @@ -142,17 +142,17 @@ pub const Instruction = union(enum) { } fn jType(op: u7, rd: Register, imm: i21) Instruction { - const umm = @bitCast(u21, imm); + const umm = @as(u21, @bitCast(imm)); assert(umm % 2 == 0); // misaligned jump target return Instruction{ .J = .{ .opcode = op, .rd = rd.id(), - .imm1_10 = @truncate(u10, umm >> 1), - .imm11 = @truncate(u1, umm >> 11), - .imm12_19 = @truncate(u8, umm >> 12), - .imm20 = @truncate(u1, umm >> 20), + .imm1_10 = @as(u10, @truncate(umm >> 1)), + .imm11 = @as(u1, @truncate(umm >> 11)), + .imm12_19 = @as(u8, @truncate(umm >> 12)), + .imm20 = @as(u1, @truncate(umm >> 20)), }, }; } @@ -258,7 +258,7 @@ pub const Instruction = union(enum) { } pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction { - return iType(0b0010011, 0b011, rd, r1, @bitCast(i12, imm)); + return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm))); } // Arithmetic/Logical, Register-Immediate (32-bit) @@ -407,7 +407,7 @@ pub const Register = enum(u6) { /// Returns the unique 4-bit ID of this register which is used in /// the machine code pub fn id(self: Register) u5 { - return @truncate(u5, @intFromEnum(self)); + return @as(u5, @truncate(@intFromEnum(self))); } pub fn dwarfLocOp(reg: Register) u8 { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f210f8e14461..9975cda5cbce 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -415,7 +415,7 @@ fn gen(self: *Self) !void { .branch_predict_int = .{ .ccr = .xcc, .cond = .al, - .inst = @intCast(u32, self.mir_instructions.len), + .inst = @as(u32, @intCast(self.mir_instructions.len)), }, }, }); @@ -840,7 +840,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch}); @@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(mod); - const array_len = @intCast(u32, array_ty.arrayLen(mod)); + const array_len = @as(u32, @intCast(array_ty.arrayLen(mod))); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -893,11 +893,11 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = (extra.data.flags & 0x80000000) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.inputs_len])); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); @@ -1237,13 +1237,13 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (operand) { .immediate => |imm| { const swapped = switch (int_info.bits) { - 16 => @byteSwap(@intCast(u16, imm)), - 24 => @byteSwap(@intCast(u24, imm)), - 32 => @byteSwap(@intCast(u32, imm)), - 40 => @byteSwap(@intCast(u40, imm)), - 48 => @byteSwap(@intCast(u48, imm)), - 56 => @byteSwap(@intCast(u56, imm)), - 64 => @byteSwap(@intCast(u64, imm)), + 16 => @byteSwap(@as(u16, @intCast(imm))), + 24 => @byteSwap(@as(u24, @intCast(imm))), + 32 => @byteSwap(@as(u32, @intCast(imm))), + 40 => @byteSwap(@as(u40, @intCast(imm))), + 48 => @byteSwap(@as(u48, @intCast(imm))), + 56 => @byteSwap(@as(u56, @intCast(imm))), + 64 => @byteSwap(@as(u64, @intCast(imm))), else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}), }; break :result .{ .immediate = swapped }; @@ -1295,7 +1295,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len])); const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -1348,7 +1348,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); - break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); + break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); } else unreachable; try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); @@ -1515,7 +1515,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.operandDies(inst, 0)) { const op_int = @intFromEnum(pl_op.operand); if (op_int >= Air.ref_start_index) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } } @@ -1851,7 +1851,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end .. loop.end + loop.data.body_len]; - const start = @intCast(u32, self.mir_instructions.len); + const start = @as(u32, @intCast(self.mir_instructions.len)); try self.genBody(body); try self.jump(start); @@ -2574,7 +2574,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .dead, .unreach => unreachable, @@ -2772,7 +2772,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } @@ -3207,7 +3207,7 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) }, }, }, .sll, @@ -3218,7 +3218,7 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u5, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u5, @intCast(rhs.immediate)) }, }, }, .sllx, @@ -3229,14 +3229,14 @@ fn binOpImmediate( .is_imm = true, .rd = dest_reg, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(rhs.immediate)) }, }, }, .cmp => .{ .arithmetic_2op = .{ .is_imm = true, .rs1 = lhs_reg, - .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) }, + .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) }, }, }, else => unreachable, @@ -3535,7 +3535,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3565,15 +3565,15 @@ fn finishAirBookkeeping(self: *Self) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @intFromEnum(op); if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); + const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index)); self.processDeath(op_index); } - const is_used = @truncate(u1, tomb_bits) == 0; + const is_used = @as(u1, @truncate(tomb_bits)) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -3663,7 +3663,7 @@ fn genInlineMemcpy( .data = .{ .branch_predict_reg = .{ .cond = .ne_zero, .rs1 = len, - .inst = @intCast(u32, self.mir_instructions.len - 2), + .inst = @as(u32, @intCast(self.mir_instructions.len - 2)), } }, }); @@ -3838,7 +3838,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .arithmetic_2op = .{ .is_imm = true, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) }, }, }, }); @@ -3848,7 +3848,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = .{ .sethi = .{ .rd = reg, - .imm = @truncate(u22, x >> 10), + .imm = @as(u22, @truncate(x >> 10)), }, }, }); @@ -3860,12 +3860,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .is_imm = true, .rd = reg, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u10, x) }, + .rs2_or_imm = .{ .imm = @as(u10, @truncate(x)) }, }, }, }); } else if (x <= math.maxInt(u44)) { - try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 12)) }); _ = try self.addInst(.{ .tag = .sllx, @@ -3886,7 +3886,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .is_imm = true, .rd = reg, .rs1 = reg, - .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) }, }, }, }); @@ -3894,8 +3894,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // Need to allocate a temporary register to load 64-bit immediates. const tmp_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) }); - try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) }); + try self.genSetReg(ty, tmp_reg, .{ .immediate = @as(u32, @truncate(x)) }); + try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 32)) }); _ = try self.addInst(.{ .tag = .sllx, @@ -3994,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4412,8 +4412,8 @@ fn parseRegName(name: []const u8) ?Register { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), - .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), + .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)), else => unreachable, } } @@ -4490,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (fn_info.param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4522,7 +4522,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4721,7 +4721,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4816,7 +4816,7 @@ fn truncRegister( .is_imm = true, .rd = dest_reg, .rs1 = operand_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, 64 - int_bits) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(64 - int_bits)) }, }, }, }); @@ -4830,7 +4830,7 @@ fn truncRegister( .is_imm = true, .rd = dest_reg, .rs1 = dest_reg, - .rs2_or_imm = .{ .imm = @intCast(u6, int_bits) }, + .rs2_or_imm = .{ .imm = @as(u6, @intCast(int_bits)) }, }, }, }); diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 7d1610534879..2c39c702694b 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -70,7 +70,7 @@ pub fn emitMir( // Emit machine code for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { .dbg_line => try emit.mirDbgLine(inst), .dbg_prologue_end => try emit.mirDebugPrologueEnd(), @@ -294,7 +294,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { .bpcc => switch (tag) { .bpcc => { const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int; - const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_int.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.code.items.len)); log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset }); try emit.writeInstruction( @@ -303,7 +303,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { branch_predict_int.annul, branch_predict_int.pt, branch_predict_int.ccr, - @intCast(i21, offset), + @as(i21, @intCast(offset)), ), ); }, @@ -312,7 +312,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { .bpr => switch (tag) { .bpr => { const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg; - const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_reg.inst).?) - @intCast(i64, emit.code.items.len); + const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.code.items.len)); log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset }); try emit.writeInstruction( @@ -321,7 +321,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { branch_predict_reg.annul, branch_predict_reg.pt, branch_predict_reg.rs1, - @intCast(i18, offset), + @as(i18, @intCast(offset)), ), ); }, @@ -437,9 +437,9 @@ fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void { if (data.is_imm) { const imm = data.rs2_or_imm.imm; switch (tag) { - .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @truncate(u5, imm), rd)), - .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @truncate(u5, imm), rd)), - .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @truncate(u5, imm), rd)), + .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @as(u5, @truncate(imm)), rd)), + .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @as(u5, @truncate(imm)), rd)), + .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @as(u5, @truncate(imm)), rd)), .sllx => try emit.writeInstruction(Instruction.sllx(u6, rs1, imm, rd)), .srlx => try emit.writeInstruction(Instruction.srlx(u6, rs1, imm, rd)), .srax => try emit.writeInstruction(Instruction.srax(u6, rs1, imm, rd)), @@ -495,7 +495,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index { } fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { - const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; switch (emit.debug_output) { .dwarf => |dbg_out| { @@ -547,7 +547,7 @@ fn lowerBranches(emit: *Emit) !void { // TODO optimization opportunity: do this in codegen while // generating MIR for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); if (isBranch(tag)) { const target_inst = emit.branchTarget(inst); @@ -592,7 +592,7 @@ fn lowerBranches(emit: *Emit) !void { var current_code_offset: usize = 0; for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); // If this instruction contained in the code offset // mapping (when it is a target of a branch or if it is a @@ -607,7 +607,7 @@ fn lowerBranches(emit: *Emit) !void { const target_inst = emit.branchTarget(inst); if (target_inst < inst) { const target_offset = emit.code_offset_mapping.get(target_inst).?; - const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset); + const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset)); const branch_type = emit.branch_types.getPtr(inst).?; const optimal_branch_type = try emit.optimalBranchType(tag, offset); if (branch_type.* != optimal_branch_type) { @@ -626,7 +626,7 @@ fn lowerBranches(emit: *Emit) !void { for (origin_list.items) |forward_branch_inst| { const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst]; const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?; - const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset); + const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset)); const branch_type = emit.branch_types.getPtr(forward_branch_inst).?; const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset); if (branch_type.* != optimal_branch_type) { diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index f9a405670503..31ea4e23c82a 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -379,7 +379,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index 81656b422b6c..04da91ca74a1 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -16,7 +16,7 @@ pub const Register = enum(u6) { // zig fmt: on pub fn id(self: Register) u5 { - return @truncate(u5, @intFromEnum(self)); + return @as(u5, @truncate(@intFromEnum(self))); } pub fn enc(self: Register) u5 { @@ -96,9 +96,9 @@ pub const FloatingPointRegister = enum(u7) { pub fn id(self: FloatingPointRegister) u6 { return switch (self.size()) { - 32 => @truncate(u6, @intFromEnum(self)), - 64 => @truncate(u6, (@intFromEnum(self) - 32) * 2), - 128 => @truncate(u6, (@intFromEnum(self) - 64) * 4), + 32 => @as(u6, @truncate(@intFromEnum(self))), + 64 => @as(u6, @truncate((@intFromEnum(self) - 32) * 2)), + 128 => @as(u6, @truncate((@intFromEnum(self) - 64) * 4)), else => unreachable, }; } @@ -109,7 +109,7 @@ pub const FloatingPointRegister = enum(u7) { // (See section 5.1.4.1 of SPARCv9 ISA specification) const reg_id = self.id(); - return @truncate(u5, reg_id | (reg_id >> 5)); + return @as(u5, @truncate(reg_id | (reg_id >> 5))); } /// Returns the bit-width of the register. @@ -752,13 +752,13 @@ pub const Instruction = union(enum) { // See section 6.2 of the SPARCv9 ISA manual. fn format1(disp: i32) Instruction { - const udisp = @bitCast(u32, disp); + const udisp = @as(u32, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero. - const udisp_truncated = @truncate(u30, udisp >> 2); + const udisp_truncated = @as(u30, @truncate(udisp >> 2)); return Instruction{ .format_1 = .{ .disp30 = udisp_truncated, @@ -777,13 +777,13 @@ pub const Instruction = union(enum) { } fn format2b(op2: u3, cond: Condition, annul: bool, disp: i24) Instruction { - const udisp = @bitCast(u24, disp); + const udisp = @as(u24, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero. - const udisp_truncated = @truncate(u22, udisp >> 2); + const udisp_truncated = @as(u22, @truncate(udisp >> 2)); return Instruction{ .format_2b = .{ .a = @intFromBool(annul), @@ -795,16 +795,16 @@ pub const Instruction = union(enum) { } fn format2c(op2: u3, cond: Condition, annul: bool, pt: bool, ccr: CCR, disp: i21) Instruction { - const udisp = @bitCast(u21, disp); + const udisp = @as(u21, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero. - const udisp_truncated = @truncate(u19, udisp >> 2); + const udisp_truncated = @as(u19, @truncate(udisp >> 2)); - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_2c = .{ .a = @intFromBool(annul), @@ -819,16 +819,16 @@ pub const Instruction = union(enum) { } fn format2d(op2: u3, rcond: RCondition, annul: bool, pt: bool, rs1: Register, disp: i18) Instruction { - const udisp = @bitCast(u18, disp); + const udisp = @as(u18, @bitCast(disp)); // In SPARC, branch target needs to be aligned to 4 bytes. assert(udisp % 4 == 0); // Discard the last two bits since those are implicitly zero, // and split it into low and high parts. - const udisp_truncated = @truncate(u16, udisp >> 2); - const udisp_hi = @truncate(u2, (udisp_truncated & 0b1100_0000_0000_0000) >> 14); - const udisp_lo = @truncate(u14, udisp_truncated & 0b0011_1111_1111_1111); + const udisp_truncated = @as(u16, @truncate(udisp >> 2)); + const udisp_hi = @as(u2, @truncate((udisp_truncated & 0b1100_0000_0000_0000) >> 14)); + const udisp_lo = @as(u14, @truncate(udisp_truncated & 0b0011_1111_1111_1111)); return Instruction{ .format_2d = .{ .a = @intFromBool(annul), @@ -860,7 +860,7 @@ pub const Instruction = union(enum) { .rd = rd.enc(), .op3 = op3, .rs1 = rs1.enc(), - .simm13 = @bitCast(u13, imm), + .simm13 = @as(u13, @bitCast(imm)), }, }; } @@ -880,7 +880,7 @@ pub const Instruction = union(enum) { .op = op, .op3 = op3, .rs1 = rs1.enc(), - .simm13 = @bitCast(u13, imm), + .simm13 = @as(u13, @bitCast(imm)), }, }; } @@ -904,7 +904,7 @@ pub const Instruction = union(enum) { .op3 = op3, .rs1 = rs1.enc(), .rcond = @intFromEnum(rcond), - .simm10 = @bitCast(u10, imm), + .simm10 = @as(u10, @bitCast(imm)), }, }; } @@ -922,8 +922,8 @@ pub const Instruction = union(enum) { fn format3h(cmask: MemCompletionConstraint, mmask: MemOrderingConstraint) Instruction { return Instruction{ .format_3h = .{ - .cmask = @bitCast(u3, cmask), - .mmask = @bitCast(u4, mmask), + .cmask = @as(u3, @bitCast(cmask)), + .mmask = @as(u4, @bitCast(mmask)), }, }; } @@ -995,8 +995,8 @@ pub const Instruction = union(enum) { }; } fn format3o(op: u2, op3: u6, opf: u9, ccr: CCR, rs1: Register, rs2: Register) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_3o = .{ .op = op, @@ -1051,8 +1051,8 @@ pub const Instruction = union(enum) { } fn format4a(op3: u6, ccr: CCR, rs1: Register, rs2: Register, rd: Register) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4a = .{ .rd = rd.enc(), @@ -1066,8 +1066,8 @@ pub const Instruction = union(enum) { } fn format4b(op3: u6, ccr: CCR, rs1: Register, imm: i11, rd: Register) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4b = .{ .rd = rd.enc(), @@ -1075,15 +1075,15 @@ pub const Instruction = union(enum) { .rs1 = rs1.enc(), .cc1 = ccr_cc1, .cc0 = ccr_cc0, - .simm11 = @bitCast(u11, imm), + .simm11 = @as(u11, @bitCast(imm)), }, }; } fn format4c(op3: u6, cond: Condition, ccr: CCR, rs2: Register, rd: Register) Instruction { - const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2); - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4c = .{ .rd = rd.enc(), @@ -1098,9 +1098,9 @@ pub const Instruction = union(enum) { } fn format4d(op3: u6, cond: Condition, ccr: CCR, imm: i11, rd: Register) Instruction { - const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2); - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4d = .{ .rd = rd.enc(), @@ -1109,14 +1109,14 @@ pub const Instruction = union(enum) { .cond = cond.enc(), .cc1 = ccr_cc1, .cc0 = ccr_cc0, - .simm11 = @bitCast(u11, imm), + .simm11 = @as(u11, @bitCast(imm)), }, }; } fn format4e(op3: u6, ccr: CCR, rs1: Register, rd: Register, sw_trap: u7) Instruction { - const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1); - const ccr_cc0 = @truncate(u1, @intFromEnum(ccr)); + const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1)); + const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr))); return Instruction{ .format_4e = .{ .rd = rd.enc(), @@ -1468,8 +1468,8 @@ pub const Instruction = union(enum) { pub fn trap(comptime s2: type, cond: ICondition, ccr: CCR, rs1: Register, rs2: s2) Instruction { // Tcc instructions abuse the rd field to store the conditionals. return switch (s2) { - Register => format4a(0b11_1010, ccr, rs1, rs2, @enumFromInt(Register, @intFromEnum(cond))), - u7 => format4e(0b11_1010, ccr, rs1, @enumFromInt(Register, @intFromEnum(cond)), rs2), + Register => format4a(0b11_1010, ccr, rs1, rs2, @as(Register, @enumFromInt(@intFromEnum(cond)))), + u7 => format4e(0b11_1010, ccr, rs1, @as(Register, @enumFromInt(@intFromEnum(cond))), rs2), else => unreachable, }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index f9e5eed626ed..3a50fc982491 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -120,7 +120,7 @@ const WValue = union(enum) { if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals. const index = local_value - reserved; - const valtype = @enumFromInt(wasm.Valtype, gen.locals.items[index]); + const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index])); switch (valtype) { .i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead .i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return, @@ -817,7 +817,7 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c assert(operands.len <= Liveness.bpi - 1); var tomb_bits = func.liveness.getTombBits(inst); for (operands) |operand| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; processDeath(func, operand); @@ -910,7 +910,7 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void { } fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void { - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.append(func.gpa, @intFromEnum(opcode)); try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } }); } @@ -934,11 +934,11 @@ fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void { /// Accepts the index into the list of 128bit-immediates fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void { const simd_values = func.simd_immediates.items[index]; - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // tag + 128bit value try func.mir_extra.ensureUnusedCapacity(func.gpa, 5); func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const)); - func.mir_extra.appendSliceAssumeCapacity(@alignCast(4, mem.bytesAsSlice(u32, &simd_values))); + func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values))); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); } @@ -979,7 +979,7 @@ fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { /// Returns the index into `mir_extra` fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, func.mir_extra.items.len); + const result = @as(u32, @intCast(func.mir_extra.items.len)); inline for (fields) |field| { func.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), @@ -1020,7 +1020,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { }, .Union => switch (ty.containerLayout(mod)) { .Packed => { - const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory"); return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, @@ -1050,7 +1050,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void { .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?) .none, .stack => {}, // no-op .local => |idx| try func.addLabel(.local_get, idx.value), - .imm32 => |val| try func.addImm32(@bitCast(i32, val)), + .imm32 => |val| try func.addImm32(@as(i32, @bitCast(val))), .imm64 => |val| try func.addImm64(val), .imm128 => |val| try func.addImm128(val), .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }), @@ -1264,7 +1264,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // In case we have a return value, but the last instruction is a noreturn (such as a while loop) // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { - const inst = @intCast(u32, func.air.instructions.len - 1); + const inst = @as(u32, @intCast(func.air.instructions.len - 1)); const last_inst_ty = func.typeOfIndex(inst); if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); @@ -1287,11 +1287,11 @@ fn genFunc(func: *CodeGen) InnerError!void { try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); // get the total stack size const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment); - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } }); // substract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); // Get negative stack aligment - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, func.stack_alignment) * -1 } }); + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } }); // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } }); // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets @@ -1432,7 +1432,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: if (value != .imm32 and value != .imm64) { const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, abi_size), + .width = @as(u8, @intCast(abi_size)), .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, .valtype1 = typeToValtype(scalar_type, mod), }); @@ -1468,7 +1468,7 @@ fn lowerToStack(func: *CodeGen, value: WValue) !void { if (offset.value > 0) { switch (func.arch()) { .wasm32 => { - try func.addImm32(@bitCast(i32, offset.value)); + try func.addImm32(@as(i32, @bitCast(offset.value))); try func.addTag(.i32_add); }, .wasm64 => { @@ -1815,7 +1815,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en if (offset + ptr_value.offset() > 0) { switch (func.arch()) { .wasm32 => { - try func.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset()))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(offset + ptr_value.offset()))))); try func.addTag(.i32_add); }, .wasm64 => { @@ -2111,7 +2111,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .width = @as(u8, @intCast(scalar_type.abiSize(mod) * 8)), .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, .valtype1 = typeToValtype(scalar_type, mod), }); @@ -2180,7 +2180,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len])); const ty = func.typeOf(pl_op.operand); const mod = func.bin_file.base.options.module.?; @@ -2319,15 +2319,15 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); - mask <<= @intCast(u6, ptr_info.packed_offset.bit_offset); + var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(mod)))) - 1)); + mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset)); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.packed_offset.host_size <= 4) WValue{ .imm32 = ptr_info.packed_offset.bit_offset } else WValue{ .imm64 = ptr_info.packed_offset.bit_offset }; const mask_val = if (ptr_info.packed_offset.host_size <= 4) - WValue{ .imm32 = @truncate(u32, mask) } + WValue{ .imm32 = @as(u32, @truncate(mask)) } else WValue{ .imm64 = mask }; @@ -2357,7 +2357,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.anyerror, 0); } - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { @@ -2372,23 +2372,23 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.anyerror, 0); } - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Struct, .Array, .Union => if (isByRef(ty, mod)) { - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { - const len = @intCast(u32, abi_size); + const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .direct => { try func.emitWValue(lhs); try func.lowerToStack(rhs); // TODO: Add helper functions for simd opcodes - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), @@ -2423,7 +2423,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(mod))) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, - .width = @intCast(u8, abi_size * 8), + .width = @as(u8, @intCast(abi_size * 8)), .op = .store, }); @@ -2501,7 +2501,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), @@ -2512,7 +2512,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(mod)); + const abi_size = @as(u8, @intCast(ty.abiSize(mod))); const opcode = buildOpcode(.{ .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, @@ -2589,10 +2589,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { + const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; + const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2868,10 +2868,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { + const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; + const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2902,7 +2902,7 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; assert(ty.abiSize(mod) <= 16); - const bitsize = @intCast(u16, ty.bitSize(mod)); + const bitsize = @as(u16, @intCast(ty.bitSize(mod))); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2916,7 +2916,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { const result_ptr = try func.allocStack(ty); try func.emitWValue(result_ptr); try func.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset()); - const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1; + const result = (@as(u64, 1) << @as(u6, @intCast(64 - (wasm_bits - bitsize)))) - 1; try func.emitWValue(result_ptr); _ = try func.load(operand, Type.u64, 0); try func.addImm64(result); @@ -2925,10 +2925,10 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return result_ptr; } - const result = (@as(u64, 1) << @intCast(u6, bitsize)) - 1; + const result = (@as(u64, 1) << @as(u6, @intCast(bitsize))) - 1; try func.emitWValue(operand); if (bitsize <= 32) { - try func.addImm32(@bitCast(i32, @intCast(u32, result))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(result))))); try func.addTag(.i32_and); } else if (bitsize <= 64) { try func.addImm64(result); @@ -2957,15 +2957,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const index = elem.index; const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); const elem_offset = index * elem_type.abiSize(mod); - return func.lowerParentPtr(elem.base.toValue(), @intCast(u32, elem_offset + offset)); + return func.lowerParentPtr(elem.base.toValue(), @as(u32, @intCast(elem_offset + offset))); }, .field => |field| { const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); const field_offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), - else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), + .Packed => parent_ty.packedStructFieldByteOffset(@as(usize, @intCast(field.index)), mod), + else => parent_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, @@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align)); + break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align))); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { @@ -2988,7 +2988,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field.base.toValue(), @intCast(u32, offset + field_offset)); + return func.lowerParentPtr(field.base.toValue(), @as(u32, @intCast(offset + field_offset))); }, } } @@ -3045,11 +3045,11 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( comptime assert(@typeInfo(T).Int.signedness == .signed); assert(bits <= 64); const WantedT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - if (value >= 0) return @bitCast(WantedT, value); - const max_value = @intCast(u64, (@as(u65, 1) << bits) - 1); - const flipped = @intCast(T, (~-@as(i65, value)) + 1); - const result = @bitCast(WantedT, flipped) & max_value; - return @intCast(WantedT, result); + if (value >= 0) return @as(WantedT, @bitCast(value)); + const max_value = @as(u64, @intCast((@as(u65, 1) << bits) - 1)); + const flipped = @as(T, @intCast((~-@as(i65, value)) + 1)); + const result = @as(WantedT, @bitCast(flipped)) & max_value; + return @as(WantedT, @intCast(result)); } fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { @@ -3150,18 +3150,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( + 0...32 => return WValue{ .imm32 = @as(u32, @intCast(toTwosComplement( val.toSignedInt(mod), - @intCast(u6, int_info.bits), - )) }, + @as(u6, @intCast(int_info.bits)), + ))) }, 33...64 => return WValue{ .imm64 = toTwosComplement( val.toSignedInt(mod), - @intCast(u7, int_info.bits), + @as(u7, @intCast(int_info.bits)), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 0...32 => return WValue{ .imm32 = @as(u32, @intCast(val.toUnsignedInt(mod))) }, 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, @@ -3198,7 +3198,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); }, .float => |float| switch (float.storage) { - .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f16 => |f16_val| return WValue{ .imm32 = @as(u16, @bitCast(f16_val)) }, .f32 => |f32_val| return WValue{ .float32 = f32_val }, .f64 => |f64_val| return WValue{ .float64 = f64_val }, else => unreachable, @@ -3254,7 +3254,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { /// Stores the value as a 128bit-immediate value by storing it inside /// the list and returning the index into this list as `WValue`. fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { - const index = @intCast(u32, func.simd_immediates.items.len); + const index = @as(u32, @intCast(func.simd_immediates.items.len)); try func.simd_immediates.append(func.gpa, value); return WValue{ .imm128 = index }; } @@ -3270,8 +3270,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { }, .Float => switch (ty.floatBits(func.target)) { 16 => return WValue{ .imm32 = 0xaaaaaaaa }, - 32 => return WValue{ .float32 = @bitCast(f32, @as(u32, 0xaaaaaaaa)) }, - 64 => return WValue{ .float64 = @bitCast(f64, @as(u64, 0xaaaaaaaaaaaaaaaa)) }, + 32 => return WValue{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) }, + 64 => return WValue{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) }, else => unreachable, }, .Pointer => switch (func.arch()) { @@ -3312,13 +3312,13 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), .int => |int| intStorageAsI32(int.storage, mod), .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), - .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)), + .err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), else => unreachable, }, } return switch (ty.zigTypeTag(mod)) { - .ErrorSet => @bitCast(i32, val.getErrorInt(mod)), + .ErrorSet => @as(i32, @bitCast(val.getErrorInt(mod))), else => unreachable, // Programmer called this function for an illegal type }; } @@ -3329,11 +3329,11 @@ fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { return switch (storage) { - .i64 => |x| @intCast(i32, x), - .u64 => |x| @bitCast(i32, @intCast(u32, x)), + .i64 => |x| @as(i32, @intCast(x)), + .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), .big_int => unreachable, - .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), - .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), + .lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))), + .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))), }; } @@ -3421,7 +3421,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.branches.ensureUnusedCapacity(func.gpa, 2); { func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len)); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len))); defer { var else_stack = func.branches.pop(); else_stack.deinit(func.gpa); @@ -3433,7 +3433,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Outer block that matches the condition { func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len)); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len))); defer { var then_stack = func.branches.pop(); then_stack.deinit(func.gpa); @@ -3715,7 +3715,7 @@ fn structFieldPtr( } switch (struct_ptr) { .stack_offset => |stack_offset| { - return WValue{ .stack_offset = .{ .value = stack_offset.value + @intCast(u32, offset), .references = 1 } }; + return WValue{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } }; }, else => return func.buildPointerOffset(struct_ptr, offset, .new), } @@ -3755,7 +3755,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.binOp(operand, const_wvalue, backing_ty, .shr); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); @@ -3764,7 +3764,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3783,14 +3783,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(mod)))); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3847,7 +3847,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var highest_maybe: ?i32 = null; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = func.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, func.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[case.end..][0..case.data.items_len])); const case_body = func.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; const values = try func.gpa.alloc(CaseValue, items.len); @@ -3904,7 +3904,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // Account for default branch so always add '1' - const depth = @intCast(u32, highest - lowest + @intFromBool(has_else_body)) + 1; + const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1; const jump_table: Mir.JumpTable = .{ .length = depth }; const table_extra_index = try func.addExtra(jump_table); try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); @@ -3915,7 +3915,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const idx = blk: { for (case_list.items, 0..) |case, idx| { for (case.values) |case_value| { - if (case_value.integer == value) break :blk @intCast(u32, idx); + if (case_value.integer == value) break :blk @as(u32, @intCast(idx)); } } // error sets are almost always sparse so we use the default case @@ -4018,7 +4018,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro try func.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))), .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -4051,7 +4051,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -4080,7 +4080,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); + const error_val = try func.load(operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); @@ -4100,13 +4100,13 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); + const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4128,11 +4128,11 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); + try func.store(err_union, operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod)); + const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new); + const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(mod))); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4154,8 +4154,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; + const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?; + const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4170,8 +4170,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const given_bitsize = @intCast(u16, given.bitSize(mod)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); + const given_bitsize = @as(u16, @intCast(given.bitSize(mod))); + const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(mod))); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4396,7 +4396,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into slice try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4426,7 +4426,7 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into slice try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4466,13 +4466,13 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const given_bits = @intCast(u16, given_ty.bitSize(mod)); + const given_bits = @as(u16, @intCast(given_ty.bitSize(mod))); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); + const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(mod))); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4505,7 +4505,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; + const len = WValue{ .imm32 = @as(u32, @intCast(array_ty.arrayLen(mod))) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); @@ -4545,7 +4545,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into slice try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4584,7 +4584,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // calculate index into ptr try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); @@ -4612,7 +4612,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(pointee_ty.abiSize(mod)))))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4635,7 +4635,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const value = try func.resolveInst(bin_op.rhs); const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), + .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(mod).arrayLen(mod))) }), .C, .Many => unreachable, }; @@ -4656,7 +4656,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { const mod = func.bin_file.base.options.module.?; - const abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4756,7 +4756,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { @@ -4772,11 +4772,11 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }; - var operands = [_]u32{ std.wasm.simdOpcode(opcode), @intCast(u8, lane) }; + var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) }; try func.emitWValue(array); - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.appendSlice(func.gpa, &operands); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); @@ -4789,7 +4789,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Is a non-unrolled vector (v128) try func.lowerToStack(stack_vec); try func.emitWValue(index); - try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); }, @@ -4886,7 +4886,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocLocal(ty); try func.emitWValue(operand); // TODO: Add helper functions for simd opcodes - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, @@ -4907,7 +4907,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; const result = try func.allocLocal(ty); try func.emitWValue(operand); - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.append(func.gpa, opcode); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); @@ -4917,13 +4917,13 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } const elem_size = elem_ty.bitSize(mod); - const vector_len = @intCast(usize, ty.vectorLen(mod)); + const vector_len = @as(usize, @intCast(ty.vectorLen(mod))); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(mod))); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4966,11 +4966,11 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(result); const loaded = if (value >= 0) - try func.load(a, child_ty, @intCast(u32, @intCast(i64, elem_size) * value)) + try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value))) else - try func.load(b, child_ty, @intCast(u32, @intCast(i64, elem_size) * ~value)); + try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value))); - try func.store(.stack, loaded, child_ty, result.stack_offset.value + @intCast(u32, elem_size) * @intCast(u32, index)); + try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index))); } return func.finishAir(inst, result, &.{ extra.a, extra.b }); @@ -4980,22 +4980,22 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } ++ [1]u32{undefined} ** 4; var lanes = std.mem.asBytes(operands[1..]); - for (0..@intCast(usize, mask_len)) |index| { + for (0..@as(usize, @intCast(mask_len))) |index| { const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) - @intCast(u8, @intCast(i64, elem_size) * mask_elem) + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem)) else - 16 + @intCast(u8, @intCast(i64, elem_size) * ~mask_elem); + 16 + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * ~mask_elem)); - for (0..@intCast(usize, elem_size)) |byte_offset| { - lanes[index * @intCast(usize, elem_size) + byte_offset] = base_index + @intCast(u8, byte_offset); + for (0..@as(usize, @intCast(elem_size))) |byte_offset| { + lanes[index * @as(usize, @intCast(elem_size)) + byte_offset] = base_index + @as(u8, @intCast(byte_offset)); } } try func.emitWValue(a); try func.emitWValue(b); - const extra_index = @intCast(u32, func.mir_extra.items.len); + const extra_index = @as(u32, @intCast(func.mir_extra.items.len)); try func.mir_extra.appendSlice(func.gpa, &operands); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); @@ -5015,15 +5015,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const result_ty = func.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(result_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len])); const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -5087,7 +5087,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations @@ -5113,7 +5113,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; const elem_ty = result_ty.structFieldType(elem_index, mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5174,7 +5174,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { - try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size)); + try func.store(result_ptr, payload, field.ty, @as(u32, @intCast(layout.tag_size))); } if (layout.tag_size > 0) { @@ -5187,21 +5187,21 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { result_ptr, tag_int, union_obj.tag_ty, - @intCast(u32, layout.payload_size), + @as(u32, @intCast(layout.payload_size)), ); } } break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(mod)))); if (field.ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod)))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } else if (field.ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); + const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod)))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5334,7 +5334,7 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // when the tag alignment is smaller than the payload, the field will be stored // after the payload. const offset = if (layout.tag_align < layout.payload_align) blk: { - break :blk @intCast(u32, layout.payload_size); + break :blk @as(u32, @intCast(layout.payload_size)); } else @as(u32, 0); try func.store(union_ptr, new_tag, tag_ty, offset); func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5353,7 +5353,7 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // when the tag alignment is smaller than the payload, the field will be stored // after the payload. const offset = if (layout.tag_align < layout.payload_align) blk: { - break :blk @intCast(u32, layout.payload_size); + break :blk @as(u32, @intCast(layout.payload_size)); } else @as(u32, 0); const tag = try func.load(operand, tag_ty, offset); const result = try tag.toLocal(func, tag_ty); @@ -5458,7 +5458,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, mod)), + @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))), ); const result = result: { @@ -5466,7 +5466,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); + break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5483,7 +5483,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); try func.addLabel(.local_get, base.local.value); - try func.addImm32(@bitCast(i32, @intCast(u32, field_offset))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(field_offset))))); try func.addTag(.i32_sub); try func.addLabel(.local_set, base.local.value); break :result base; @@ -5514,14 +5514,14 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice_len = try func.sliceLen(dst); if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); + try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(mod))) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), + .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod))), }), .C, .Many => unreachable, }; @@ -5611,7 +5611,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); switch (func.arch()) { .wasm32 => { - try func.addImm32(@bitCast(i32, @intCast(u32, abi_size))); + try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(abi_size))))); try func.addTag(.i32_mul); try func.addTag(.i32_add); }, @@ -5708,7 +5708,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(mod)); + const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5830,7 +5830,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(mod)); + const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -6005,7 +6005,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(mod)); + const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -6149,7 +6149,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (wasm_bits) { 32 => { if (wasm_bits != int_info.bits) { - const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits); + const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits)); // leave value on the stack _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or"); } else try func.emitWValue(operand); @@ -6157,7 +6157,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, 64 => { if (wasm_bits != int_info.bits) { - const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits); + const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits)); // leave value on the stack _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or"); } else try func.emitWValue(operand); @@ -6172,7 +6172,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i64_ctz); _ = try func.load(operand, Type.u64, 8); if (wasm_bits != int_info.bits) { - try func.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64)); + try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64))); try func.addTag(.i64_or); } try func.addTag(.i64_ctz); @@ -6275,7 +6275,7 @@ fn lowerTry( // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); + const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, .alignment = Type.anyerror.abiAlignment(mod), @@ -6300,7 +6300,7 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))); if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } @@ -6590,9 +6590,9 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty); defer bin_result.free(func); if (wasm_bits != int_info.bits and op == .add) { - const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1); + const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1)); const imm_val = switch (wasm_bits) { - 32 => WValue{ .imm32 = @intCast(u32, val) }, + 32 => WValue{ .imm32 = @as(u32, @intCast(val)) }, 64 => WValue{ .imm64 = val }, else => unreachable, }; @@ -6603,7 +6603,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } else { switch (wasm_bits) { 32 => try func.addImm32(if (op == .add) @as(i32, -1) else 0), - 64 => try func.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0), + 64 => try func.addImm64(if (op == .add) @as(u64, @bitCast(@as(i64, -1))) else 0), else => unreachable, } try func.emitWValue(bin_result); @@ -6629,16 +6629,16 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, break :rhs try (try func.signAbsValue(rhs_operand, ty)).toLocal(func, ty); } else rhs_operand; - const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1); - const min_val: i64 = (-@intCast(i64, @intCast(u63, max_val))) - 1; + const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1)); + const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1; const max_wvalue = switch (wasm_bits) { - 32 => WValue{ .imm32 = @truncate(u32, max_val) }, + 32 => WValue{ .imm32 = @as(u32, @truncate(max_val)) }, 64 => WValue{ .imm64 = max_val }, else => unreachable, }; const min_wvalue = switch (wasm_bits) { - 32 => WValue{ .imm32 = @bitCast(u32, @truncate(i32, min_val)) }, - 64 => WValue{ .imm64 = @bitCast(u64, min_val) }, + 32 => WValue{ .imm32 = @as(u32, @bitCast(@as(i32, @truncate(min_val)))) }, + 64 => WValue{ .imm64 = @as(u64, @bitCast(min_val)) }, else => unreachable, }; @@ -6715,11 +6715,11 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, 64 => blk: { if (!is_signed) { - try func.addImm64(@bitCast(u64, @as(i64, -1))); + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))); break :blk; } - try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64)))); - try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64)))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64))))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64))))); _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt); try func.addTag(.select); }, @@ -6759,12 +6759,12 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, 64 => blk: { if (!is_signed) { - try func.addImm64(@bitCast(u64, @as(i64, -1))); + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))); break :blk; } - try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64)))); - try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64)))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64))))); + try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64))))); _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt); try func.addTag(.select); }, @@ -6894,7 +6894,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. @@ -6953,7 +6953,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.i32_const)); try relocs.append(.{ .relocation_type = .R_WASM_MEMORY_ADDR_LEB, - .offset = @intCast(u32, body_list.items.len), + .offset = @as(u32, @intCast(body_list.items.len)), .index = tag_sym_index, }); try writer.writeAll(&[_]u8{0} ** 5); // will be relocated @@ -6965,7 +6965,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // store length try writer.writeByte(std.wasm.opcode(.i32_const)); - try leb.writeULEB128(writer, @intCast(u32, tag_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(tag_name.len))); try writer.writeByte(std.wasm.opcode(.i32_store)); try leb.writeULEB128(writer, encoded_alignment); try leb.writeULEB128(writer, @as(u32, 4)); @@ -6974,7 +6974,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.i64_const)); try relocs.append(.{ .relocation_type = .R_WASM_MEMORY_ADDR_LEB64, - .offset = @intCast(u32, body_list.items.len), + .offset = @as(u32, @intCast(body_list.items.len)), .index = tag_sym_index, }); try writer.writeAll(&[_]u8{0} ** 10); // will be relocated @@ -6986,7 +6986,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // store length try writer.writeByte(std.wasm.opcode(.i64_const)); - try leb.writeULEB128(writer, @intCast(u64, tag_name.len)); + try leb.writeULEB128(writer, @as(u64, @intCast(tag_name.len))); try writer.writeByte(std.wasm.opcode(.i64_store)); try leb.writeULEB128(writer, encoded_alignment); try leb.writeULEB128(writer, @as(u32, 8)); @@ -7026,7 +7026,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); + const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7054,11 +7054,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // lower operand to determine jump table target try func.emitWValue(operand); - try func.addImm32(@intCast(i32, lowest.?)); + try func.addImm32(@as(i32, @intCast(lowest.?))); try func.addTag(.i32_sub); // Account for default branch so always add '1' - const depth = @intCast(u32, highest.? - lowest.? + 1); + const depth = @as(u32, @intCast(highest.? - lowest.? + 1)); const jump_table: Mir.JumpTable = .{ .length = depth }; const table_extra_index = try func.addExtra(jump_table); try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } }); @@ -7155,7 +7155,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); + try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(mod)))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7221,13 +7221,13 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; + const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) try func.addImm32(-1) else if (wasm_bits == 64) - try func.addImm64(@bitCast(u64, @as(i64, -1))) + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))) else return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); _ = try func.binOp(and_res, .stack, ty, .xor); @@ -7352,14 +7352,14 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; + const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); if (wasm_bits == 32) try func.addImm32(-1) else if (wasm_bits == 64) - try func.addImm64(@bitCast(u64, @as(i64, -1))) + try func.addImm64(@as(u64, @bitCast(@as(i64, -1)))) else return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{}); _ = try func.binOp(and_res, .stack, ty, .xor); diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index 3314f4d993be..3b1911b895da 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -45,7 +45,7 @@ pub fn emitMir(emit: *Emit) InnerError!void { try emit.emitLocals(); for (mir_tags, 0..) |tag, index| { - const inst = @intCast(u32, index); + const inst = @as(u32, @intCast(index)); switch (tag) { // block instructions .block => try emit.emitBlock(tag, inst), @@ -247,7 +247,7 @@ pub fn emitMir(emit: *Emit) InnerError!void { } fn offset(self: Emit) u32 { - return @intCast(u32, self.code.items.len); + return @as(u32, @intCast(self.code.items.len)); } fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @@ -260,7 +260,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { fn emitLocals(emit: *Emit) !void { const writer = emit.code.writer(); - try leb128.writeULEB128(writer, @intCast(u32, emit.locals.len)); + try leb128.writeULEB128(writer, @as(u32, @intCast(emit.locals.len))); // emit the actual locals amount for (emit.locals) |local| { try leb128.writeULEB128(writer, @as(u32, 1)); @@ -324,13 +324,13 @@ fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void { const extra_index = emit.mir.instructions.items(.data)[inst].payload; const value = emit.mir.extraData(Mir.Imm64, extra_index); try emit.code.append(std.wasm.opcode(.i64_const)); - try leb128.writeILEB128(emit.code.writer(), @bitCast(i64, value.data.toU64())); + try leb128.writeILEB128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64()))); } fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void { const value: f32 = emit.mir.instructions.items(.data)[inst].float32; try emit.code.append(std.wasm.opcode(.f32_const)); - try emit.code.writer().writeIntLittle(u32, @bitCast(u32, value)); + try emit.code.writer().writeIntLittle(u32, @as(u32, @bitCast(value))); } fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void { @@ -425,7 +425,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void { .offset = mem_offset, .index = mem.pointer, .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64, - .addend = @intCast(i32, mem.offset), + .addend = @as(i32, @intCast(mem.offset)), }); } } @@ -436,7 +436,7 @@ fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void { const writer = emit.code.writer(); try emit.code.append(std.wasm.opcode(.misc_prefix)); try leb128.writeULEB128(writer, opcode); - switch (@enumFromInt(std.wasm.MiscOpcode, opcode)) { + switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) { // bulk-memory opcodes .data_drop => { const segment = emit.mir.extra[extra_index + 1]; @@ -475,7 +475,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void { const writer = emit.code.writer(); try emit.code.append(std.wasm.opcode(.simd_prefix)); try leb128.writeULEB128(writer, opcode); - switch (@enumFromInt(std.wasm.SimdOpcode, opcode)) { + switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) { .v128_store, .v128_load, .v128_load8_splat, @@ -507,7 +507,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void { .f64x2_extract_lane, .f64x2_replace_lane, => { - try writer.writeByte(@intCast(u8, emit.mir.extra[extra_index + 1])); + try writer.writeByte(@as(u8, @intCast(emit.mir.extra[extra_index + 1]))); }, .i8x16_splat, .i16x8_splat, @@ -526,7 +526,7 @@ fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void { const writer = emit.code.writer(); try emit.code.append(std.wasm.opcode(.atomics_prefix)); try leb128.writeULEB128(writer, opcode); - switch (@enumFromInt(std.wasm.AtomicsOpcode, opcode)) { + switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) { .i32_atomic_load, .i64_atomic_load, .i32_atomic_load8_u, @@ -623,7 +623,7 @@ fn emitDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void { if (emit.dbg_output != .dwarf) return; - const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc = emit.offset() - emit.prev_di_offset; // TODO: This must emit a relocation to calculate the offset relative // to the code section start. diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig index 6e93f0fb88b3..2d4f624b22c5 100644 --- a/src/arch/wasm/Mir.zig +++ b/src/arch/wasm/Mir.zig @@ -544,12 +544,12 @@ pub const Inst = struct { /// From a given wasm opcode, returns a MIR tag. pub fn fromOpcode(opcode: std.wasm.Opcode) Tag { - return @enumFromInt(Tag, @intFromEnum(opcode)); // Given `Opcode` is not present as a tag for MIR yet + return @as(Tag, @enumFromInt(@intFromEnum(opcode))); // Given `Opcode` is not present as a tag for MIR yet } /// Returns a wasm opcode from a given MIR tag. pub fn toOpcode(self: Tag) std.wasm.Opcode { - return @enumFromInt(std.wasm.Opcode, @intFromEnum(self)); + return @as(std.wasm.Opcode, @enumFromInt(@intFromEnum(self))); } }; @@ -621,8 +621,8 @@ pub const Imm64 = struct { pub fn fromU64(imm: u64) Imm64 { return .{ - .msb = @truncate(u32, imm >> 32), - .lsb = @truncate(u32, imm), + .msb = @as(u32, @truncate(imm >> 32)), + .lsb = @as(u32, @truncate(imm)), }; } @@ -639,15 +639,15 @@ pub const Float64 = struct { lsb: u32, pub fn fromFloat64(float: f64) Float64 { - const tmp = @bitCast(u64, float); + const tmp = @as(u64, @bitCast(float)); return .{ - .msb = @truncate(u32, tmp >> 32), - .lsb = @truncate(u32, tmp), + .msb = @as(u32, @truncate(tmp >> 32)), + .lsb = @as(u32, @truncate(tmp)), }; } pub fn toF64(self: Float64) f64 { - @bitCast(f64, self.toU64()); + @as(f64, @bitCast(self.toU64())); } pub fn toU64(self: Float64) u64 { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index edf84089b1fd..4993e3fe45d4 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -329,7 +329,7 @@ pub const MCValue = union(enum) { .load_frame, .reserved_frame, => unreachable, // not offsettable - .immediate => |imm| .{ .immediate = @bitCast(u64, @bitCast(i64, imm) +% off) }, + .immediate => |imm| .{ .immediate = @as(u64, @bitCast(@as(i64, @bitCast(imm)) +% off)) }, .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } }, .register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off }, @@ -360,7 +360,7 @@ pub const MCValue = union(enum) { .lea_frame, .reserved_frame, => unreachable, - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr }) else Memory.moffs(.ds, addr), @@ -606,7 +606,7 @@ const FrameAlloc = struct { fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc { assert(math.isPowerOfTwo(alloc_abi.alignment)); return .{ - .abi_size = @intCast(u31, alloc_abi.size), + .abi_size = @as(u31, @intCast(alloc_abi.size)), .abi_align = math.log2_int(u32, alloc_abi.alignment), .ref_count = 0, }; @@ -694,7 +694,7 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| - @intCast(u32, set_align_stack.alignment.toByteUnitsOptional().?) + @as(u32, @intCast(set_align_stack.alignment.toByteUnitsOptional().?)) else 1, }), @@ -979,7 +979,7 @@ fn fmtTracking(self: *Self) std.fmt.Formatter(formatTracking) { fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len); + const result_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); self.mir_instructions.appendAssumeCapacity(inst); if (inst.tag != .pseudo or switch (inst.ops) { else => true, @@ -1000,11 +1000,11 @@ fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @intCast(u32, self.mir_extra.items.len); + const result = @as(u32, @intCast(self.mir_extra.items.len)); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(u32, @field(extra, field.name)), + i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), }); } @@ -1214,8 +1214,8 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.FixedTag, imm: Immediate) !void { .data = .{ .i = .{ .fixes = tag[0], .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), }, } }, }); @@ -1246,8 +1246,8 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, imm: .fixes = tag[0], .r1 = reg, .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), }, } }, .ri64 => .{ .rx = .{ @@ -1316,7 +1316,7 @@ fn asmRegisterRegisterRegisterImmediate( .r1 = reg1, .r2 = reg2, .r3 = reg3, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), } }, }); } @@ -1339,8 +1339,8 @@ fn asmRegisterRegisterImmediate( .r1 = reg1, .r2 = reg2, .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), }, } }, }); @@ -1429,7 +1429,7 @@ fn asmRegisterMemoryImmediate( .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1458,7 +1458,7 @@ fn asmRegisterRegisterMemoryImmediate( .fixes = tag[0], .r1 = reg1, .r2 = reg2, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1490,8 +1490,8 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Regist fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void { const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .signed => |s| @as(u32, @bitCast(s)), + .unsigned => |u| @as(u32, @intCast(u)), } }); assert(payload + 1 == switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), @@ -1562,7 +1562,7 @@ fn asmMemoryRegisterImmediate( .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, - .i = @intCast(u8, imm.unsigned), + .i = @as(u8, @intCast(imm.unsigned)), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1617,7 +1617,7 @@ fn gen(self: *Self) InnerError!void { // Eliding the reloc will cause a miscompilation in this case. for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.items(.data)[jmp_reloc].inst.inst = - @intCast(u32, self.mir_instructions.len); + @as(u32, @intCast(self.mir_instructions.len)); } try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); @@ -1739,7 +1739,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { if (builtin.mode == .Debug) { - const mir_inst = @intCast(Mir.Inst.Index, self.mir_instructions.len); + const mir_inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } @@ -2032,7 +2032,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { var data_off: i32 = 0; for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { - const index = @intCast(u32, index_usize); + const index = @as(u32, @intCast(index_usize)); const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); @@ -2050,7 +2050,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { exitlude_jump_reloc.* = try self.asmJmpReloc(undefined); try self.performReloc(skip_reloc); - data_off += @intCast(i32, tag_name.len + 1); + data_off += @as(i32, @intCast(tag_name.len + 1)); } try self.airTrap(); @@ -2126,7 +2126,7 @@ fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void { fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; self.processDeath(Air.refToIndexAllowNone(op) orelse continue); @@ -2167,7 +2167,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { const frame_offset = self.frame_locs.items(.disp); for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index| - frame_order.* = @enumFromInt(FrameIndex, frame_index); + frame_order.* = @as(FrameIndex, @enumFromInt(frame_index)); { const SortContext = struct { frame_align: @TypeOf(frame_align), @@ -2195,7 +2195,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { } } - var rbp_offset = @intCast(i32, save_reg_list.count() * 8); + var rbp_offset = @as(i32, @intCast(save_reg_list.count() * 8)); self.setFrameLoc(.base_ptr, .rbp, &rbp_offset, false); self.setFrameLoc(.ret_addr, .rbp, &rbp_offset, false); self.setFrameLoc(.args_frame, .rbp, &rbp_offset, false); @@ -2210,22 +2210,22 @@ fn computeFrameLayout(self: *Self) !FrameLayout { rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@intFromEnum(FrameIndex.call_frame)] = - @intCast(u31, rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]); + @as(u31, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)])); return .{ .stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0), - .stack_adjust = @intCast(u32, rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]), + .stack_adjust = @as(u32, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)])), .save_reg_list = save_reg_list, }; } fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 { const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align; - return @min(alloc_align, @bitCast(u32, frame_addr.off) & (alloc_align - 1)); + return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1)); } fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 { - return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @intCast(u31, frame_addr.off); + return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @as(u31, @intCast(frame_addr.off)); } fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { @@ -2245,7 +2245,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { _ = self.free_frame_indices.swapRemoveAt(free_i); return frame_index; } - const frame_index = @enumFromInt(FrameIndex, self.frame_allocs.len); + const frame_index = @as(FrameIndex, @enumFromInt(self.frame_allocs.len)); try self.frame_allocs.append(self.gpa, alloc); return frame_index; } @@ -2321,7 +2321,7 @@ const State = struct { fn initRetroactiveState(self: *Self) State { var state: State = undefined; - state.inst_tracking_len = @intCast(u32, self.inst_tracking.count()); + state.inst_tracking_len = @as(u32, @intCast(self.inst_tracking.count())); state.scope_generation = self.scope_generation; return state; } @@ -2393,7 +2393,7 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt } { const reg = RegisterManager.regAtTrackedIndex( - @intCast(RegisterManager.RegisterBitSet.ShiftInt, index), + @as(RegisterManager.RegisterBitSet.ShiftInt, @intCast(index)), ); self.register_manager.freeReg(reg); self.register_manager.getRegAssumeFree(reg, target_maybe_inst); @@ -2628,7 +2628,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const dst_ty = self.typeOfIndex(inst); const dst_int_info = dst_ty.intInfo(mod); - const abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2706,9 +2706,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2753,13 +2753,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }); const elem_ty = src_ty.childType(mod); - const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - dst_info.bits))); const splat_ty = try mod.vectorType(.{ - .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .len = @as(u32, @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits))), .child = elem_ty.ip_index, }); - const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); + const splat_abi_size = @as(u32, @intCast(splat_ty.abiSize(mod))); const splat_val = try mod.intern(.{ .aggregate = .{ .ty = splat_ty.ip_index, @@ -2834,7 +2834,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(mod)), + @as(i32, @intCast(ptr_ty.abiSize(mod))), len_ty, len, ); @@ -2875,7 +2875,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; const src_int = src_val.toBigInt(&space, mod); - return @intCast(u16, src_int.bitCountTwosComp()) + + return @as(u16, @intCast(src_int.bitCountTwosComp())) + @intFromBool(src_int.positive and dst_info.signedness == .signed); }, .intcast => { @@ -2964,7 +2964,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, }); if (reg_extra_bits > 0) { const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); @@ -2983,7 +2983,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), + .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - ty.bitSize(mod))), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -2994,7 +2994,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3043,7 +3043,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, }); if (reg_extra_bits > 0) { const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); @@ -3066,7 +3066,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3114,18 +3114,18 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, }); break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits), + .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - reg_bits)), }); break :cc .c; }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3172,13 +3172,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), ty, partial_mcv, ); @@ -3245,13 +3245,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), tuple_ty.structFieldType(0, mod), partial_mcv, ); @@ -3319,7 +3319,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); + const payload_off = @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3329,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); @@ -3386,13 +3386,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); @@ -3416,7 +3416,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3456,7 +3456,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, @@ -3595,7 +3595,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(mod); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); + const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod))); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3628,7 +3628,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); if (err_off > 0) { - const shift = @intCast(u6, err_off * 8); + const shift = @as(u6, @intCast(err_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, err_union_ty, @@ -3642,7 +3642,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { }, .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, err_off), + .off = frame_addr.off + @as(i32, @intCast(err_off)), } }, else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), } @@ -3674,7 +3674,7 @@ fn genUnwrapErrorUnionPayloadMir( switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, payload_off), + .off = frame_addr.off + @as(i32, @intCast(payload_off)), } }, .register => |reg| { // TODO reuse operand @@ -3686,7 +3686,7 @@ fn genUnwrapErrorUnionPayloadMir( else .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; if (payload_off > 0) { - const shift = @intCast(u6, payload_off * 8); + const shift = @as(u6, @intCast(payload_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, err_union_ty, @@ -3727,8 +3727,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); - const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3766,8 +3766,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3793,8 +3793,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); - const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3814,8 +3814,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3864,14 +3864,14 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); + const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod))); switch (opt_mcv) { else => unreachable, .register => |opt_reg| try self.asmRegisterImmediate( .{ ._s, .bt }, opt_reg, - Immediate.u(@intCast(u6, pl_abi_size * 8)), + Immediate.u(@as(u6, @intCast(pl_abi_size * 8))), ), .load_frame => |frame_addr| try self.asmMemoryImmediate( @@ -3903,8 +3903,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3925,8 +3925,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); + const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3988,7 +3988,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4165,7 +4165,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { // additional `mov` is needed at the end to get the actual value const elem_ty = ptr_ty.elemType2(mod); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -4305,7 +4305,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { .load_frame => |frame_addr| { if (tag_abi_size <= 8) { const off: i32 = if (layout.tag_align < layout.payload_align) - @intCast(i32, layout.payload_size) + @as(i32, @intCast(layout.payload_size)) else 0; break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{ @@ -4317,13 +4317,13 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }, .register => { const shift: u6 = if (layout.tag_align < layout.payload_align) - @intCast(u6, layout.payload_size * 8) + @as(u6, @intCast(layout.payload_size * 8)) else 0; const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand); try self.genShiftBinOpMir(.{ ._r, .sh }, Type.usize, result, .{ .immediate = shift }); break :blk MCValue{ - .register = registerAlias(result.register, @intCast(u32, layout.tag_size)), + .register = registerAlias(result.register, @as(u32, @intCast(layout.tag_size))), }; }, else => return self.fail("TODO implement get_union_tag for {}", .{operand}), @@ -4420,7 +4420,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4430,7 +4430,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 }); } else { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - self.regBitSize(dst_ty)), + .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - self.regBitSize(dst_ty))), }); const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg); defer self.register_manager.unlockReg(imm_lock); @@ -4447,7 +4447,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4501,8 +4501,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { .{ ._, .@"or" }, wide_ty, tmp_mcv, - .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) << - @intCast(u6, src_bits) }, + .{ .immediate = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - extra_bits))) << + @as(u6, @intCast(src_bits)) }, ); break :masked tmp_mcv; } else mat_src_mcv; @@ -4519,7 +4519,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { .{ ._, .@"or" }, Type.u64, dst_mcv, - .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(u6, src_bits - 64) }, + .{ .immediate = @as(u64, math.maxInt(u64)) << @as(u6, @intCast(src_bits - 64)) }, ); break :masked dst_mcv; } else mat_src_mcv.address().offset(8).deref(); @@ -4547,7 +4547,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4563,7 +4563,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4588,7 +4588,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8); + const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8)); const imm_0_1 = Immediate.u(mask / 0b1_1); const imm_00_11 = Immediate.u(mask / 0b01_01); const imm_0000_1111 = Immediate.u(mask / 0b0001_0001); @@ -4754,7 +4754,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4774,7 +4774,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { else undefined; - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8); + const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8)); const imm_0000_1111 = Immediate.u(mask / 0b0001_0001); const imm_00_11 = Immediate.u(mask / 0b01_01); const imm_0_1 = Immediate.u(mask / 0b1_1); @@ -5017,7 +5017,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5057,7 +5057,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5123,7 +5123,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { .{ .v_ps, .cvtph2 }, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( - @intCast(u32, @divExact(wide_reg.bitSize(), 16)), + @as(u32, @intCast(@divExact(wide_reg.bitSize(), 16))), )), ) else try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5255,10 +5255,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.child.toType(); - const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); + const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; - const val_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size); + const val_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size)); const val_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits; const val_extra_bits = self.regExtraBits(val_ty); @@ -5404,7 +5404,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const limb_abi_bits = limb_abi_size * 8; const src_bit_size = src_ty.bitSize(mod); - const src_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size); + const src_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size)); const src_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits; const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); @@ -5421,13 +5421,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In .disp = src_byte_off + limb_i * limb_abi_bits, }); - const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - part_bit_size)) << - @intCast(u6, part_bit_off); + const part_mask = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - part_bit_size))) << + @as(u6, @intCast(part_bit_off)); const part_mask_not = part_mask ^ - (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_abi_bits)); + (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_abi_bits))); if (limb_abi_size <= 4) { try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.u(part_mask_not)); - } else if (math.cast(i32, @bitCast(i64, part_mask_not))) |small| { + } else if (math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| { try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.s(small)); } else { const part_mask_reg = try self.register_manager.allocReg(null, gp); @@ -5542,14 +5542,14 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(mod); - const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) { + const field_offset = @as(i32, @intCast(switch (container_ty.containerLayout(mod)) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0) container_ty.packedStructFieldByteOffset(index, mod) else 0, - }); + })); const src_mcv = try self.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { @@ -5577,7 +5577,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout(mod)) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), + .Auto, .Extern => @as(u32, @intCast(container_ty.structFieldOffset(index, mod) * 8)), .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| struct_obj.packedFieldBitOffset(mod, index) else @@ -5588,7 +5588,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { .load_frame => |frame_addr| { if (field_off % 8 == 0) { const off_mcv = - src_mcv.address().offset(@intCast(i32, @divExact(field_off, 8))).deref(); + src_mcv.address().offset(@as(i32, @intCast(@divExact(field_off, 8)))).deref(); if (self.reuseOperand(inst, operand, 0, src_mcv)) break :result off_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); @@ -5596,10 +5596,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); + const field_abi_size = @as(u32, @intCast(field_ty.abiSize(mod))); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; - const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); + const field_byte_off = @as(i32, @intCast(field_off / limb_abi_bits * limb_abi_size)); const field_bit_off = field_off % limb_abi_bits; if (field_abi_size > 8) { @@ -5643,7 +5643,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { tmp_reg, Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + field_byte_off + @intCast(i32, limb_abi_size), + .disp = frame_addr.off + field_byte_off + @as(i32, @intCast(limb_abi_size)), }), ); try self.asmRegisterRegisterImmediate( @@ -5724,7 +5724,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(mod); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); + const field_offset = @as(i32, @intCast(parent_ty.structFieldOffset(extra.field_index, mod))); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5773,14 +5773,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); + const limb_abi_size = @as(u16, @intCast(@min(src_ty.abiSize(mod), 8))); const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_bits = @as(u16, @intCast(@min(int_info.bits - byte_off * 8, limb_abi_size * 8))); const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, @@ -5788,7 +5788,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: }; if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); + const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_bits)); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5801,7 +5801,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5863,7 +5863,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -5886,7 +5886,7 @@ fn genShiftBinOpMir( const lhs_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (lhs_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = math.cast(i32, @bitCast(i64, addr)) orelse + .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ @tagName(lhs_mcv), @tagName(rhs_mcv), @@ -6151,8 +6151,8 @@ fn genMulDivBinOp( if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); - const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); + const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6326,7 +6326,7 @@ fn genBinOp( const mod = self.bin_file.options.module.?; const lhs_ty = self.typeOf(lhs_air); const rhs_ty = self.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(lhs_ty.abiSize(mod))); const maybe_mask_reg = switch (air_tag) { else => null, @@ -6481,7 +6481,7 @@ fn genBinOp( .lea_tlv, .lea_frame, => true, - .memory => |addr| math.cast(i32, @bitCast(i64, addr)) == null, + .memory => |addr| math.cast(i32, @as(i64, @bitCast(addr))) == null, else => false, }) .{ .register = try self.copyToTmpRegister(rhs_ty, src_mcv) } else src_mcv; const mat_mcv_lock = switch (mat_src_mcv) { @@ -6506,7 +6506,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); + const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(mod))), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6541,7 +6541,7 @@ fn genBinOp( Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, @bitCast(i64, addr)), + .disp = @as(i32, @intCast(@as(i64, @bitCast(addr)))), }, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, @@ -7429,7 +7429,7 @@ fn genBinOpMir( src_mcv: MCValue, ) !void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); switch (dst_mcv) { .none, .unreach, @@ -7465,28 +7465,28 @@ fn genBinOpMir( 8 => try self.asmRegisterImmediate( mir_tag, dst_alias, - if (math.cast(i8, @bitCast(i64, imm))) |small| + if (math.cast(i8, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u8, imm)), + Immediate.u(@as(u8, @intCast(imm))), ), 16 => try self.asmRegisterImmediate( mir_tag, dst_alias, - if (math.cast(i16, @bitCast(i64, imm))) |small| + if (math.cast(i16, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u16, imm)), + Immediate.u(@as(u16, @intCast(imm))), ), 32 => try self.asmRegisterImmediate( mir_tag, dst_alias, - if (math.cast(i32, @bitCast(i64, imm))) |small| + if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u32, imm)), + Immediate.u(@as(u32, @intCast(imm))), ), - 64 => if (math.cast(i32, @bitCast(i64, imm))) |small| + 64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| try self.asmRegisterImmediate(mir_tag, dst_alias, Immediate.s(small)) else try self.asmRegisterRegister(mir_tag, dst_alias, registerAlias( @@ -7602,8 +7602,8 @@ fn genBinOpMir( => null, .memory, .load_got, .load_direct, .load_tlv => src: { switch (src_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr)) != null and - math.cast(i32, @bitCast(i64, addr) + abi_size - limb_abi_size) != null) + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr))) != null and + math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null) break :src null, .load_got, .load_direct, .load_tlv => {}, else => unreachable, @@ -7680,7 +7680,7 @@ fn genBinOpMir( const imm = switch (off) { 0 => src_imm, else => switch (ty_signedness) { - .signed => @bitCast(u64, @bitCast(i64, src_imm) >> 63), + .signed => @as(u64, @bitCast(@as(i64, @bitCast(src_imm)) >> 63)), .unsigned => 0, }, }; @@ -7688,28 +7688,28 @@ fn genBinOpMir( 8 => try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, - if (math.cast(i8, @bitCast(i64, imm))) |small| + if (math.cast(i8, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u8, imm)), + Immediate.u(@as(u8, @intCast(imm))), ), 16 => try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, - if (math.cast(i16, @bitCast(i64, imm))) |small| + if (math.cast(i16, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u16, imm)), + Immediate.u(@as(u16, @intCast(imm))), ), 32 => try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, - if (math.cast(i32, @bitCast(i64, imm))) |small| + if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| Immediate.s(small) else - Immediate.u(@intCast(u32, imm)), + Immediate.u(@as(u32, @intCast(imm))), ), - 64 => if (math.cast(i32, @bitCast(i64, imm))) |small| + 64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| try self.asmMemoryImmediate( mir_limb_tag, dst_limb_mem, @@ -7753,7 +7753,7 @@ fn genBinOpMir( 0 => src_mcv, else => .{ .immediate = 0 }, }, - .memory => |addr| .{ .memory = @bitCast(u64, @bitCast(i64, addr) + off) }, + .memory => |addr| .{ .memory = @as(u64, @bitCast(@as(i64, @bitCast(addr)) + off)) }, .indirect => |reg_off| .{ .indirect = .{ .reg = reg_off.reg, .off = reg_off.off + off, @@ -7780,7 +7780,7 @@ fn genBinOpMir( /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); switch (dst_mcv) { .none, .unreach, @@ -7847,7 +7847,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = math.cast(i32, @bitCast(i64, addr)) orelse + .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse return self.asmRegisterRegister( .{ .i_, .mul }, dst_alias, @@ -8014,7 +8014,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = atom.getOffsetTableAddress(elf_file); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, got_addr), + .disp = @as(i32, @intCast(got_addr)), })); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(owner_decl); @@ -8124,7 +8124,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = p9.getAtom(atom_index); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, atom.getOffsetTableAddress(p9)), + .disp = @as(i32, @intCast(atom.getOffsetTableAddress(p9))), })); } else unreachable; } else if (func_value.getExternFunc(mod)) |extern_func| { @@ -8244,7 +8244,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result = MCValue{ .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(mod)); + const abi_size = @as(u16, @intCast(ty.abiSize(mod))); const may_flip: enum { may_flip, must_flip, @@ -8441,7 +8441,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; const op_ty = self.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); + const op_abi_size = @as(u32, @intCast(op_ty.abiSize(mod))); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8650,7 +8650,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; + .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8670,18 +8670,18 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.ip_index == .bool_type); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); + const opt_abi_size = @as(u32, @intCast(opt_ty.abiSize(mod))); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), - Immediate.u(@intCast(u6, some_info.off * 8)), + Immediate.u(@as(u6, @intCast(some_info.off * 8))), ); return .{ .eflags = .nc }; }, @@ -8696,7 +8696,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8709,7 +8709,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8741,7 +8741,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; + .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8750,7 +8750,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); + const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8783,7 +8783,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const tmp_reg = try self.copyToTmpRegister(ty, operand); if (err_off > 0) { - const shift = @intCast(u6, err_off * 8); + const shift = @as(u6, @intCast(err_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, ty, @@ -8805,7 +8805,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! Type.anyerror, .{ .load_frame = .{ .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, err_off), + .off = frame_addr.off + @as(i32, @intCast(err_off)), } }, .{ .immediate = 0 }, ), @@ -8943,7 +8943,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const jmp_target = @intCast(u32, self.mir_instructions.len); + const jmp_target = @as(u32, @intCast(self.mir_instructions.len)); self.scope_generation += 1; const state = try self.saveState(); @@ -9015,9 +9015,9 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast( + const items = @as( []const Air.Inst.Ref, - self.air.extra[case.end..][0..case.data.items_len], + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]), ); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -9066,7 +9066,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { } fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { - const next_inst = @intCast(u32, self.mir_instructions.len); + const next_inst = @as(u32, @intCast(self.mir_instructions.len)); switch (self.mir_instructions.items(.tag)[reloc]) { .j, .jmp => {}, .pseudo => switch (self.mir_instructions.items(.ops)[reloc]) { @@ -9141,11 +9141,11 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const clobbers_len = @truncate(u31, extra.data.flags); + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; var result: MCValue = .none; @@ -9281,7 +9281,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| { if (mnem_size) |size| { const max = @as(u64, math.maxInt(u64)) >> - @intCast(u6, 64 - (size.bitSize() - 1)); + @as(u6, @intCast(64 - (size.bitSize() - 1))); if ((if (s < 0) ~s else s) > max) return self.fail("Invalid immediate size: '{s}'", .{op_str}); } @@ -9289,7 +9289,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| { if (mnem_size) |size| { const max = @as(u64, math.maxInt(u64)) >> - @intCast(u6, 64 - size.bitSize()); + @as(u6, @intCast(64 - size.bitSize())); if (u > max) return self.fail("Invalid immediate size: '{s}'", .{op_str}); } @@ -9618,7 +9618,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError .indirect => |reg_off| try self.genSetMem(.{ .reg = reg_off.reg }, reg_off.off, ty, src_mcv), .memory, .load_direct, .load_got, .load_tlv => { switch (dst_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv), .load_direct, .load_got, .load_tlv => {}, else => unreachable, @@ -9641,7 +9641,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9662,11 +9662,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } else if (abi_size > 4 and math.cast(u32, imm) != null) { // 32-bit moves zero-extend to 64-bit. try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), Immediate.u(imm)); - } else if (abi_size <= 4 and @bitCast(i64, imm) < 0) { + } else if (abi_size <= 4 and @as(i64, @bitCast(imm)) < 0) { try self.asmRegisterImmediate( .{ ._, .mov }, registerAlias(dst_reg, abi_size), - Immediate.s(@intCast(i32, @bitCast(i64, imm))), + Immediate.s(@as(i32, @intCast(@as(i64, @bitCast(imm))))), ); } else { try self.asmRegisterImmediate( @@ -9806,7 +9806,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }, .memory, .load_direct, .load_got, .load_tlv => { switch (src_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| { + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| { const dst_alias = registerAlias(dst_reg, abi_size); const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = .ds }, @@ -9814,7 +9814,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, - @bitCast(u32, small_addr), + @as(u32, @bitCast(small_addr)), ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), @@ -9928,9 +9928,9 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const dst_ptr_mcv: MCValue = switch (base) { - .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, + .none => .{ .immediate = @as(u64, @bitCast(@as(i64, disp))) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, }; @@ -9941,9 +9941,9 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .immediate => |imm| switch (abi_size) { 1, 2, 4 => { const immediate = if (ty.isSignedInt(mod)) - Immediate.s(@truncate(i32, @bitCast(i64, imm))) + Immediate.s(@as(i32, @truncate(@as(i64, @bitCast(imm))))) else - Immediate.u(@intCast(u32, imm)); + Immediate.u(@as(u32, @intCast(imm))); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), @@ -9951,7 +9951,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal ); }, 3, 5...7 => unreachable, - else => if (math.cast(i32, @bitCast(i64, imm))) |small| { + else => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| { try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), @@ -9963,14 +9963,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), if (ty.isSignedInt(mod)) - Immediate.s(@truncate( + Immediate.s(@as( i32, - @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), + @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)), )) else - Immediate.u(@truncate( + Immediate.u(@as( u32, - if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0, + @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0), )), ); }, @@ -9985,13 +9985,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal switch (try self.moveStrategy(ty, switch (base) { .none => mem.isAlignedGeneric( u32, - @bitCast(u32, disp), + @as(u32, @bitCast(disp)), ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, - @bitCast(u32, disp), + @as(u32, @bitCast(disp)), ty.abiAlignment(mod), ), else => false, @@ -10012,13 +10012,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, mod)), + disp + @as(i32, @intCast(ty.structFieldOffset(0, mod))), ty.structFieldType(0, mod), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, mod)), + disp + @as(i32, @intCast(ty.structFieldOffset(1, mod))), ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, ); @@ -10077,7 +10077,7 @@ fn genLazySymbolRef( _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) }); switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -10099,7 +10099,7 @@ fn genLazySymbolRef( _ = atom.getOrCreateOffsetTableEntry(p9_file); const got_addr = atom.getOffsetTableAddress(p9_file); const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) }); switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -10195,8 +10195,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(mod)); - const bit_size = @intCast(u16, dst_ty.bitSize(mod)); + const abi_size = @as(u16, @intCast(dst_ty.abiSize(mod))); + const bit_size = @as(u16, @intCast(dst_ty.bitSize(mod))); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10237,7 +10237,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(mod)), + @as(i32, @intCast(ptr_ty.abiSize(mod))), Type.usize, .{ .immediate = array_len }, ); @@ -10251,7 +10251,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(mod)); + const src_bits = @as(u32, @intCast(src_ty.bitSize(mod))); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const dst_ty = self.typeOfIndex(inst); @@ -10306,7 +10306,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { const src_ty = self.typeOf(ty_op.operand); const dst_ty = self.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); + const dst_bits = @as(u32, @intCast(dst_ty.bitSize(mod))); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; @@ -10359,7 +10359,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(extra.ptr); const val_ty = self.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); + const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10461,7 +10461,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); + const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,7 +10539,7 @@ fn atomicOp( defer self.register_manager.unlockReg(tmp_lock); try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem); - const loop = @intCast(u32, self.mir_instructions.len); + const loop = @as(u32, @intCast(self.mir_instructions.len)); if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax }); } @@ -10613,7 +10613,7 @@ fn atomicOp( .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 8, })); - const loop = @intCast(u32, self.mir_instructions.len); + const loop = @as(u32, @intCast(self.mir_instructions.len)); const val_mem_mcv: MCValue = switch (val_mcv) { .memory, .indirect, .load_frame => val_mcv, else => .{ .indirect = .{ @@ -10769,7 +10769,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); + const elem_abi_size = @as(u31, @intCast(elem_ty.abiSize(mod))); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { @@ -11249,9 +11249,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen(mod)); + const len = @as(usize, @intCast(result_ty.arrayLen(mod))); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const result: MCValue = result: { switch (result_ty.zigTypeTag(mod)) { .Struct => { @@ -11268,17 +11268,17 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); + const elem_bit_size = @as(u32, @intCast(elem_ty.bitSize(mod))); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); const elem_abi_bits = elem_abi_size * 8; const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); - const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); + const elem_byte_off = @as(i32, @intCast(elem_off / elem_abi_bits * elem_abi_size)); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { @@ -11330,7 +11330,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { elem_ty, .{ .load_frame = .{ .index = frame_index, - .off = elem_byte_off + @intCast(i32, elem_abi_size), + .off = elem_byte_off + @as(i32, @intCast(elem_abi_size)), } }, .{ .register = reg }, ); @@ -11340,7 +11340,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); + const elem_off = @as(i32, @intCast(result_ty.structFieldOffset(elem_i, mod))); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11354,7 +11354,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); const elem_ty = result_ty.childType(mod); - const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11362,12 +11362,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, else => elem_mcv, }; - const elem_off = @intCast(i32, elem_size * elem_i); + const elem_off = @as(i32, @intCast(elem_size * elem_i)); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, elem_size * elements.len), + @as(i32, @intCast(elem_size * elements.len)), elem_ty, try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }), ); @@ -11416,7 +11416,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const tag_int_val = try tag_val.intFromEnum(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) - @intCast(i32, layout.payload_size) + @as(i32, @intCast(layout.payload_size)) else 0; try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int }); @@ -11424,7 +11424,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const pl_off = if (layout.tag_align < layout.payload_align) 0 else - @intCast(i32, layout.tag_size); + @as(i32, @intCast(layout.tag_size)); try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv); break :result dst_mcv; @@ -11454,7 +11454,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var order = [1]u2{0} ** 3; var unused = std.StaticBitSet(3).initFull(); for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| { - const op_index = @intCast(u2, op_i); + const op_index = @as(u2, @intCast(op_i)); mcv.* = try self.resolveInst(op); if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) { order[op_index] = 1; @@ -11470,7 +11470,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } for (&order, &mcvs, &locks) |*mop_index, *mcv, *lock| { if (mop_index.* != 0) continue; - mop_index.* = 1 + @intCast(u2, unused.toggleFirstSet().?); + mop_index.* = 1 + @as(u2, @intCast(unused.toggleFirstSet().?)); if (mop_index.* > 1 and mcv.isRegister()) continue; const reg = try self.copyToTmpRegister(ty, mcv.*); mcv.* = .{ .register = reg }; @@ -11570,7 +11570,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(ty.abiSize(mod))); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11723,7 +11723,7 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); + result.stack_byte_count += @as(u31, @intCast(4 * Type.usize.abiSize(mod))); }, else => {}, } @@ -11746,7 +11746,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(mod)), + @as(u32, @intCast(ret_ty.abiSize(mod))), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11782,17 +11782,17 @@ fn resolveCallingConventionValues( }, .float, .sse => switch (self.target.os.tag) { .windows => if (param_reg_i < 4) { - arg.* = .{ .register = @enumFromInt( + arg.* = .{ .register = @as( Register, - @intFromEnum(Register.xmm0) + param_reg_i, + @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i), ) }; param_reg_i += 1; continue; }, else => if (param_sse_reg_i < 8) { - arg.* = .{ .register = @enumFromInt( + arg.* = .{ .register = @as( Register, - @intFromEnum(Register.xmm0) + param_sse_reg_i, + @enumFromInt(@intFromEnum(Register.xmm0) + param_sse_reg_i), ) }; param_sse_reg_i += 1; continue; @@ -11804,8 +11804,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(mod)); - const param_align = @intCast(u31, ty.abiAlignment(mod)); + const param_size = @as(u31, @intCast(ty.abiSize(mod))); + const param_align = @as(u31, @intCast(ty.abiAlignment(mod))); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11825,7 +11825,7 @@ fn resolveCallingConventionValues( result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); + const ret_ty_size = @as(u31, @intCast(ret_ty.abiSize(mod))); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11844,8 +11844,8 @@ fn resolveCallingConventionValues( arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(mod)); - const param_align = @intCast(u31, ty.abiAlignment(mod)); + const param_size = @as(u31, @intCast(ty.abiSize(mod))); + const param_align = @as(u31, @intCast(ty.abiAlignment(mod))); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11932,12 +11932,12 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { const mod = self.bin_file.options.module.?; const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(mod)), + .bits = @as(u16, @intCast(ty.bitSize(mod))), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { .signed => { - const shift = @intCast(u6, max_reg_bit_width - int_info.bits); + const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits)); try self.genShiftBinOpMir( .{ ._l, .sa }, Type.isize, @@ -11952,7 +11952,7 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { ); }, .unsigned => { - const shift = @intCast(u6, max_reg_bit_width - int_info.bits); + const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits)); const mask = (~@as(u64, 0)) >> shift; if (int_info.bits <= 32) { try self.genBinOpMir( diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 78ff918715bc..9c9aadbd13d1 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -19,18 +19,18 @@ pub const Error = Lower.Error || error{ pub fn emitMir(emit: *Emit) Error!void { for (0..emit.lower.mir.instructions.len) |mir_i| { - const mir_index = @intCast(Mir.Inst.Index, mir_i); + const mir_index = @as(Mir.Inst.Index, @intCast(mir_i)); try emit.code_offset_mapping.putNoClobber( emit.lower.allocator, mir_index, - @intCast(u32, emit.code.items.len), + @as(u32, @intCast(emit.code.items.len)), ); const lowered = try emit.lower.lowerMir(mir_index); var lowered_relocs = lowered.relocs; for (lowered.insts, 0..) |lowered_inst, lowered_index| { - const start_offset = @intCast(u32, emit.code.items.len); + const start_offset = @as(u32, @intCast(emit.code.items.len)); try lowered_inst.encode(emit.code.writer(), .{}); - const end_offset = @intCast(u32, emit.code.items.len); + const end_offset = @as(u32, @intCast(emit.code.items.len)); while (lowered_relocs.len > 0 and lowered_relocs[0].lowered_inst_index == lowered_index) : ({ lowered_relocs = lowered_relocs[1..]; @@ -39,7 +39,7 @@ pub fn emitMir(emit: *Emit) Error!void { .source = start_offset, .target = target, .offset = end_offset - 4, - .length = @intCast(u5, end_offset - start_offset), + .length = @as(u5, @intCast(end_offset - start_offset)), }), .linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| { // Add relocation to the decl. @@ -89,7 +89,7 @@ pub fn emitMir(emit: *Emit) Error!void { else => unreachable, }, .target = .{ .sym_index = symbol.sym_index, .file = null }, - .offset = @intCast(u32, end_offset - 4), + .offset = @as(u32, @intCast(end_offset - 4)), .addend = 0, .pcrel = true, .length = 2, @@ -113,7 +113,7 @@ pub fn emitMir(emit: *Emit) Error!void { .linker_import => coff_file.getGlobalByIndex(symbol.sym_index), else => unreachable, }, - .offset = @intCast(u32, end_offset - 4), + .offset = @as(u32, @intCast(end_offset - 4)), .addend = 0, .pcrel = true, .length = 2, @@ -122,7 +122,7 @@ pub fn emitMir(emit: *Emit) Error!void { const atom_index = symbol.atom_index; try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct .target = symbol.sym_index, // we set sym_index to just be the atom index - .offset = @intCast(u32, end_offset - 4), + .offset = @as(u32, @intCast(end_offset - 4)), .addend = 0, .pcrel = true, }); @@ -209,13 +209,13 @@ fn fixupRelocs(emit: *Emit) Error!void { for (emit.relocs.items) |reloc| { const target = emit.code_offset_mapping.get(reloc.target) orelse return emit.fail("JMP/CALL relocation target not found!", .{}); - const disp = @intCast(i32, @intCast(i64, target) - @intCast(i64, reloc.source + reloc.length)); + const disp = @as(i32, @intCast(@as(i64, @intCast(target)) - @as(i64, @intCast(reloc.source + reloc.length)))); mem.writeIntLittle(i32, emit.code.items[reloc.offset..][0..4], disp); } } fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void { - const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line); + const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line)); const delta_pc: usize = emit.code.items.len - emit.prev_di_pc; log.debug(" (advance pc={d} and line={d})", .{ delta_line, delta_pc }); switch (emit.debug_output) { @@ -233,22 +233,22 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void { // increasing the line number try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); // increasing the pc - const d_pc_p9 = @intCast(i64, delta_pc) - quant; + const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant; if (d_pc_p9 > 0) { // minus one because if its the last one, we want to leave space to change the line which is one quanta var diff = @divExact(d_pc_p9, quant) - quant; while (diff > 0) { if (diff < 64) { - try dbg_out.dbg_line.append(@intCast(u8, diff + 128)); + try dbg_out.dbg_line.append(@as(u8, @intCast(diff + 128))); diff = 0; } else { - try dbg_out.dbg_line.append(@intCast(u8, 64 + 128)); + try dbg_out.dbg_line.append(@as(u8, @intCast(64 + 128))); diff -= 64; } } if (dbg_out.pcop_change_index.*) |pci| dbg_out.dbg_line.items[pci] += 1; - dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1)); } else if (d_pc_p9 == 0) { // we don't need to do anything, because adding the quant does it for us } else unreachable; diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index a3963ca149b9..ca260f5ec4af 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -85,7 +85,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct { rex: Rex, }, modrm_ext: ?u3) ?Encoding { for (mnemonic_to_encodings_map, 0..) |encs, mnemonic_int| for (encs) |data| { - const enc = Encoding{ .mnemonic = @enumFromInt(Mnemonic, mnemonic_int), .data = data }; + const enc = Encoding{ .mnemonic = @as(Mnemonic, @enumFromInt(mnemonic_int)), .data = data }; if (modrm_ext) |ext| if (ext != data.modrm_ext) continue; if (!std.mem.eql(u8, opc, enc.opcode())) continue; if (prefixes.rex.w) { @@ -763,7 +763,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op var cwriter = std.io.countingWriter(std.io.null_writer); inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM. - return @intCast(usize, cwriter.bytes_written); + return @as(usize, @intCast(cwriter.bytes_written)); } const mnemonic_to_encodings_map = init: { diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index d77ddf3050be..53aa18295701 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -188,7 +188,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_probe_align_ri_s => { try lower.emit(.none, .@"test", &.{ .{ .reg = inst.data.ri.r1 }, - .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) }, + .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) }, }); try lower.emit(.none, .jz, &.{ .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, @@ -213,7 +213,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }, .pseudo_probe_adjust_unrolled_ri_s => { var offset = page_size; - while (offset < @bitCast(i32, inst.data.ri.i)) : (offset += page_size) { + while (offset < @as(i32, @bitCast(inst.data.ri.i))) : (offset += page_size) { try lower.emit(.none, .@"test", &.{ .{ .mem = Memory.sib(.dword, .{ .base = .{ .reg = inst.data.ri.r1 }, @@ -224,14 +224,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { } try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.ri.r1 }, - .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) }, + .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) }, }); assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts); }, .pseudo_probe_adjust_setup_rri_s => { try lower.emit(.none, .mov, &.{ .{ .reg = inst.data.rri.r2.to32() }, - .{ .imm = Immediate.s(@bitCast(i32, inst.data.rri.i)) }, + .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.rri.i))) }, }); try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.rri.r1 }, @@ -289,7 +289,7 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .i_s, .mi_sib_s, .mi_rip_s, - => Immediate.s(@bitCast(i32, i)), + => Immediate.s(@as(i32, @bitCast(i))), .rrri, .rri_u, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 36eacf4db93a..7753104b9654 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -989,7 +989,7 @@ pub const RegisterList = struct { fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt { for (registers, 0..) |cpreg, i| { - if (reg.id() == cpreg.id()) return @intCast(u32, i); + if (reg.id() == cpreg.id()) return @as(u32, @intCast(i)); } unreachable; // register not in input register list! } @@ -1009,7 +1009,7 @@ pub const RegisterList = struct { } pub fn count(self: Self) u32 { - return @intCast(u32, self.bitset.count()); + return @as(u32, @intCast(self.bitset.count())); } }; @@ -1023,15 +1023,15 @@ pub const Imm64 = struct { pub fn encode(v: u64) Imm64 { return .{ - .msb = @truncate(u32, v >> 32), - .lsb = @truncate(u32, v), + .msb = @as(u32, @truncate(v >> 32)), + .lsb = @as(u32, @truncate(v)), }; } pub fn decode(imm: Imm64) u64 { var res: u64 = 0; - res |= (@intCast(u64, imm.msb) << 32); - res |= @intCast(u64, imm.lsb); + res |= (@as(u64, @intCast(imm.msb)) << 32); + res |= @as(u64, @intCast(imm.lsb)); return res; } }; @@ -1070,18 +1070,18 @@ pub const MemorySib = struct { } pub fn decode(msib: MemorySib) Memory { - const scale = @truncate(u4, msib.scale_index); + const scale = @as(u4, @truncate(msib.scale_index)); assert(scale == 0 or std.math.isPowerOfTwo(scale)); return .{ .sib = .{ - .ptr_size = @enumFromInt(Memory.PtrSize, msib.ptr_size), - .base = switch (@enumFromInt(Memory.Base.Tag, msib.base_tag)) { + .ptr_size = @as(Memory.PtrSize, @enumFromInt(msib.ptr_size)), + .base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) { .none => .none, - .reg => .{ .reg = @enumFromInt(Register, msib.base) }, - .frame => .{ .frame = @enumFromInt(bits.FrameIndex, msib.base) }, + .reg => .{ .reg = @as(Register, @enumFromInt(msib.base)) }, + .frame => .{ .frame = @as(bits.FrameIndex, @enumFromInt(msib.base)) }, }, .scale_index = .{ .scale = scale, - .index = if (scale > 0) @enumFromInt(Register, msib.scale_index >> 4) else undefined, + .index = if (scale > 0) @as(Register, @enumFromInt(msib.scale_index >> 4)) else undefined, }, .disp = msib.disp, } }; @@ -1103,7 +1103,7 @@ pub const MemoryRip = struct { pub fn decode(mrip: MemoryRip) Memory { return .{ .rip = .{ - .ptr_size = @enumFromInt(Memory.PtrSize, mrip.ptr_size), + .ptr_size = @as(Memory.PtrSize, @enumFromInt(mrip.ptr_size)), .disp = mrip.disp, } }; } @@ -1120,14 +1120,14 @@ pub const MemoryMoffs = struct { pub fn encode(seg: Register, offset: u64) MemoryMoffs { return .{ .seg = @intFromEnum(seg), - .msb = @truncate(u32, offset >> 32), - .lsb = @truncate(u32, offset >> 0), + .msb = @as(u32, @truncate(offset >> 32)), + .lsb = @as(u32, @truncate(offset >> 0)), }; } pub fn decode(moffs: MemoryMoffs) Memory { return .{ .moffs = .{ - .seg = @enumFromInt(Register, moffs.seg), + .seg = @as(Register, @enumFromInt(moffs.seg)), .offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0, } }; } @@ -1147,7 +1147,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end: inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], - i32 => @bitCast(i32, mir.extra[i]), + i32 => @as(i32, @bitCast(mir.extra[i])), else => @compileError("bad field type"), }; i += 1; diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index b4e175f33d71..f1ce3ebeb8b2 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -278,7 +278,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { // "Otherwise class SSE is used." result[result_i] = .sse; } - byte_i += @intCast(usize, field_size); + byte_i += @as(usize, @intCast(field_size)); if (byte_i == 8) { byte_i = 0; result_i += 1; @@ -293,7 +293,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { result_i += field_class.len; // If there are any bytes leftover, we have to try to combine // the next field with them. - byte_i = @intCast(usize, field_size % 8); + byte_i = @as(usize, @intCast(field_size % 8)); if (byte_i != 0) result_i -= 1; } } diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index e232a2db05ff..04b21b9e21c8 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -232,7 +232,7 @@ pub const Register = enum(u7) { else => unreachable, // zig fmt: on }; - return @intCast(u6, @intFromEnum(reg) - base); + return @as(u6, @intCast(@intFromEnum(reg) - base)); } pub fn bitSize(reg: Register) u64 { @@ -291,11 +291,11 @@ pub const Register = enum(u7) { else => unreachable, // zig fmt: on }; - return @truncate(u4, @intFromEnum(reg) - base); + return @as(u4, @truncate(@intFromEnum(reg) - base)); } pub fn lowEnc(reg: Register) u3 { - return @truncate(u3, reg.enc()); + return @as(u3, @truncate(reg.enc())); } pub fn toBitSize(reg: Register, bit_size: u64) Register { @@ -325,19 +325,19 @@ pub const Register = enum(u7) { } pub fn to64(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax))); } pub fn to32(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax))); } pub fn to16(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax))); } pub fn to8(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al))); } fn sseBase(reg: Register) u7 { @@ -350,11 +350,11 @@ pub const Register = enum(u7) { } pub fn to256(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0))); } pub fn to128(reg: Register) Register { - return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)); + return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0))); } /// DWARF register encoding @@ -363,7 +363,7 @@ pub const Register = enum(u7) { .general_purpose => if (reg.isExtended()) reg.enc() else - @truncate(u3, @as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3), + @as(u3, @truncate(@as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3)), .sse => 17 + @as(u6, reg.enc()), .x87 => 33 + @as(u6, reg.enc()), .mmx => 41 + @as(u6, reg.enc()), @@ -610,15 +610,15 @@ pub const Immediate = union(enum) { pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 { return switch (imm) { .signed => |x| switch (bit_size) { - 1, 8 => @bitCast(u8, @intCast(i8, x)), - 16 => @bitCast(u16, @intCast(i16, x)), - 32, 64 => @bitCast(u32, x), + 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))), + 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))), + 32, 64 => @as(u32, @bitCast(x)), else => unreachable, }, .unsigned => |x| switch (bit_size) { - 1, 8 => @intCast(u8, x), - 16 => @intCast(u16, x), - 32 => @intCast(u32, x), + 1, 8 => @as(u8, @intCast(x)), + 16 => @as(u16, @intCast(x)), + 32 => @as(u32, @intCast(x)), 64 => x, else => unreachable, }, diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index d953a9410d4d..bc4c59dc861c 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -471,7 +471,7 @@ pub const Instruction = struct { } else { try encoder.sib_baseDisp8(dst); } - try encoder.disp8(@truncate(i8, sib.disp)); + try encoder.disp8(@as(i8, @truncate(sib.disp))); } else { try encoder.modRm_SIBDisp32(src); if (mem.scaleIndex()) |si| { @@ -487,7 +487,7 @@ pub const Instruction = struct { try encoder.modRm_indirectDisp0(src, dst); } else if (math.cast(i8, sib.disp)) |_| { try encoder.modRm_indirectDisp8(src, dst); - try encoder.disp8(@truncate(i8, sib.disp)); + try encoder.disp8(@as(i8, @truncate(sib.disp))); } else { try encoder.modRm_indirectDisp32(src, dst); try encoder.disp32(sib.disp); @@ -509,9 +509,9 @@ pub const Instruction = struct { fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void { const raw = imm.asUnsigned(kind.immBitSize()); switch (kind.immBitSize()) { - 8 => try encoder.imm8(@intCast(u8, raw)), - 16 => try encoder.imm16(@intCast(u16, raw)), - 32 => try encoder.imm32(@intCast(u32, raw)), + 8 => try encoder.imm8(@as(u8, @intCast(raw))), + 16 => try encoder.imm16(@as(u16, @intCast(raw))), + 32 => try encoder.imm32(@as(u32, @intCast(raw))), 64 => try encoder.imm64(raw), else => unreachable, } @@ -581,7 +581,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type { /// Encodes legacy prefixes pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void { - if (@bitCast(u16, prefixes) != 0) { + if (@as(u16, @bitCast(prefixes)) != 0) { // Hopefully this path isn't taken very often, so we'll do it the slow way for now // LOCK @@ -891,7 +891,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type { /// /// It is sign-extended to 64 bits by the cpu. pub fn disp8(self: Self, disp: i8) !void { - try self.writer.writeByte(@bitCast(u8, disp)); + try self.writer.writeByte(@as(u8, @bitCast(disp))); } /// Encode an 32 bit displacement diff --git a/src/clang.zig b/src/clang.zig index d6a655a7046b..75c7e00c68e5 100644 --- a/src/clang.zig +++ b/src/clang.zig @@ -117,7 +117,7 @@ pub const APFloatBaseSemantics = enum(c_int) { pub const APInt = opaque { pub fn getLimitedValue(self: *const APInt, comptime T: type) T { - return @truncate(T, ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T))); + return @as(T, @truncate(ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T)))); } extern fn ZigClangAPInt_getLimitedValue(*const APInt, limit: u64) u64; }; diff --git a/src/codegen.zig b/src/codegen.zig index 3bd7dca2c68c..9e5ae11a6399 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -108,7 +108,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian _ = target; const bits = @typeInfo(F).Float.bits; const Int = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = bits } }); - const int = @bitCast(Int, f); + const int = @as(Int, @bitCast(f)); mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian); } @@ -143,18 +143,18 @@ pub fn generateLazySymbol( if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; const err_names = mod.global_error_set.keys(); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); + mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); for (err_names) |err_name_nts| { const err_name = mod.intern_pool.stringToSlice(err_name_nts); - mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); + mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian); offset += 4; try code.ensureUnusedCapacity(err_name.len + 1); code.appendSliceAssumeCapacity(err_name); code.appendAssumeCapacity(0); } - mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); + mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian); return Result.ok; } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; @@ -253,12 +253,12 @@ pub fn generateSymbol( }, .err => |err| { const int = try mod.getErrorValue(err.name); - try code.writer().writeInt(u16, @intCast(u16, int), endian); + try code.writer().writeInt(u16, @as(u16, @intCast(int)), endian); }, .error_union => |error_union| { const payload_ty = typed_value.ty.errorUnionPayload(mod); const err_val = switch (error_union.val) { - .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)), + .err_name => |err_name| @as(u16, @intCast(try mod.getErrorValue(err_name))), .payload => @as(u16, 0), }; @@ -397,7 +397,7 @@ pub fn generateSymbol( .ty = array_type.child.toType(), .val = switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], + .elems => |elems| elems[@as(usize, @intCast(index))], .repeated_elem => |elem| elem, }.toValue(), }, code, debug_output, reloc_info)) { @@ -417,7 +417,7 @@ pub fn generateSymbol( .ty = vector_type.child.toType(), .val = switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], + .elems => |elems| elems[@as(usize, @intCast(index))], .repeated_elem => |elem| elem, }.toValue(), }, code, debug_output, reloc_info)) { @@ -509,7 +509,7 @@ pub fn generateSymbol( } else { field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; } - bits += @intCast(u16, field_ty.bitSize(mod)); + bits += @as(u16, @intCast(field_ty.bitSize(mod))); } } else { const struct_begin = code.items.len; @@ -642,10 +642,10 @@ fn lowerParentPtr( eu_payload, code, debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + reloc_info.offset(@as(u32, @intCast(errUnionPayloadOffset( mod.intern_pool.typeOf(eu_payload).toType(), mod, - ))), + )))), ), .opt_payload => |opt_payload| try lowerParentPtr( bin_file, @@ -661,8 +661,8 @@ fn lowerParentPtr( elem.base, code, debug_output, - reloc_info.offset(@intCast(u32, elem.index * - mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + reloc_info.offset(@as(u32, @intCast(elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod)))), ), .field => |field| { const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child; @@ -684,10 +684,10 @@ fn lowerParentPtr( .struct_type, .anon_struct_type, .union_type, - => @intCast(u32, base_type.toType().structFieldOffset( - @intCast(u32, field.index), + => @as(u32, @intCast(base_type.toType().structFieldOffset( + @as(u32, @intCast(field.index)), mod, - )), + ))), else => unreachable, }), ); @@ -735,8 +735,8 @@ fn lowerDeclRef( }); const endian = target.cpu.arch.endian(); switch (ptr_width) { - 16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian), - 32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, vaddr), endian), + 16 => mem.writeInt(u16, try code.addManyAsArray(2), @as(u16, @intCast(vaddr)), endian), + 32 => mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(vaddr)), endian), 64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian), else => unreachable, } @@ -945,7 +945,7 @@ pub fn genTypedValue( const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .signed => @as(u64, @bitCast(typed_value.val.toSignedInt(mod))), .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8afaae7cfa99..317d77602f19 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -326,7 +326,7 @@ pub const Function = struct { .cty_idx = try f.typeToIndex(ty, .complete), .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); - return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; + return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) }; } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { @@ -644,7 +644,7 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(base_ty, .complete); const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { - .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod), + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@as(usize, @intCast(field.index)), mod), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => unreachable, .Slice => switch (field.index) { @@ -662,7 +662,7 @@ pub const DeclGen = struct { try dg.renderCType(writer, ptr_cty); try writer.writeByte(')'); } - switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + switch (fieldLocation(base_ty, ptr_ty, @as(u32, @intCast(field.index)), mod)) { .begin => try dg.renderParentPtr(writer, field.base, location), .field => |name| { try writer.writeAll("&("); @@ -740,11 +740,11 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{@bitCast(f16, undefPattern(i16))}), - 32 => try writer.print("{x}", .{@bitCast(f32, undefPattern(i32))}), - 64 => try writer.print("{x}", .{@bitCast(f64, undefPattern(i64))}), - 80 => try writer.print("{x}", .{@bitCast(f80, undefPattern(i80))}), - 128 => try writer.print("{x}", .{@bitCast(f128, undefPattern(i128))}), + 16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}), + 32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}), + 64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}), + 80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}), + 128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}), else => unreachable, } try writer.writeAll(", "); @@ -1041,11 +1041,11 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), - 128 => repr_val_big.set(@bitCast(u128, f128_val)), + 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, mod)))), + 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, mod)))), + 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, mod)))), + 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, mod)))), + 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))), else => unreachable, } @@ -1103,11 +1103,11 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), - 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), + 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, mod)))}), + 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, mod)))}), + 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, mod)))}), + 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, mod)))}), + 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}), else => unreachable, }; try writer.writeAll(", "); @@ -1225,11 +1225,11 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod))); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + const s_u8 = @as(u8, @intCast(s.toUnsignedInt(mod))); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1239,7 +1239,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod))); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1840,7 +1840,7 @@ pub const DeclGen = struct { decl.ty, .{ .decl = decl_index }, CQualifiers.init(.{ .@"const" = variable.is_const }), - @intCast(u32, decl.alignment.toByteUnits(0)), + @as(u32, @intCast(decl.alignment.toByteUnits(0))), .complete, ); try fwd_decl_writer.writeAll(";\n"); @@ -1907,7 +1907,7 @@ pub const DeclGen = struct { const mod = dg.module; const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(mod)), + .bits = @as(u16, @intCast(ty.bitSize(mod))), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); @@ -2481,7 +2481,7 @@ fn genExports(o: *Object) !void { if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { for (exports.items[1..], 1..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); - try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); + try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @as(u32, @intCast(i)) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null), fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null), @@ -2510,7 +2510,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| { - const index = @intCast(u32, index_usize); + const index = @as(u32, @intCast(index_usize)); const name = mod.intern_pool.stringToSlice(name_ip); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); @@ -2783,7 +2783,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con // Remember how many locals there were before entering the body so that we can free any that // were newly introduced. Any new locals must necessarily be logically free after the then // branch is complete. - const pre_locals_len = @intCast(LocalIndex, f.locals.items.len); + const pre_locals_len = @as(LocalIndex, @intCast(f.locals.items.len)); for (leading_deaths) |death| { try die(f, inst, Air.indexToRef(death)); @@ -2804,7 +2804,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con // them, unless they were used to store allocs. for (pre_locals_len..f.locals.items.len) |local_i| { - const local_index = @intCast(LocalIndex, local_i); + const local_index = @as(LocalIndex, @intCast(local_i)); if (f.allocs.contains(local_index)) { continue; } @@ -3364,7 +3364,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); + const field_ty = try mod.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(mod)))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3667,7 +3667,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits)); defer mask.deinit(); - try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(usize, src_bits)); + try mask.setTwosCompIntLimit(.max, .unsigned, @as(usize, @intCast(src_bits))); try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); @@ -4096,7 +4096,7 @@ fn airCall( const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra.end..][0..extra.data.args_len])); const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); @@ -4537,7 +4537,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca wrap_cty = elem_cty.toSignedness(dest_info.signedness); need_bitcasts = wrap_cty.?.tag() == .zig_i128; bits -= 1; - bits %= @intCast(u16, f.byteSize(elem_cty) * 8); + bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8)); bits += 1; } try writer.writeAll(" = "); @@ -4711,7 +4711,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { var extra_index: usize = switch_br.end; for (0..switch_br.data.cases_len) |case_i| { const case = f.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[case.end..][0..case.data.items_len])); const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -4771,13 +4771,13 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); const gpa = f.object.dg.gpa; var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; const result = result: { @@ -4794,7 +4794,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { break :local local; } else .none; - const locals_begin = @intCast(LocalIndex, f.locals.items.len); + const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len)); const constraints_extra_begin = extra_i; for (outputs) |output| { const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]); @@ -5402,7 +5402,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { inst_ty.intInfo(mod).signedness else .unsigned; - const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); + const field_int_ty = try mod.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(mod)))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -6033,7 +6033,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); const repr_ty = if (ty.isRuntimeFloat()) - mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable else ty; @@ -6136,7 +6136,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, writer, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const repr_bits = @intCast(u16, ty.abiSize(mod) * 8); + const repr_bits = @as(u16, @intCast(ty.abiSize(mod) * 8)); const is_float = ty.isRuntimeFloat(); const is_128 = repr_bits == 128; const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty; @@ -6186,7 +6186,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ty = ptr_ty.childType(mod); const repr_ty = if (ty.isRuntimeFloat()) - mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable else ty; @@ -6226,7 +6226,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const repr_ty = if (ty.isRuntimeFloat()) - mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable else ty; @@ -6574,7 +6574,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("] = "); const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); - const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63)); + const src_val = try mod.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); @@ -6745,8 +6745,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); - const len = @intCast(usize, inst_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(inst_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len])); const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); defer gpa.free(resolved_elements); @@ -7387,7 +7387,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri fn undefPattern(comptime IntType: type) IntType { const int_info = @typeInfo(IntType).Int; const UnsignedType = std.meta.Int(.unsigned, int_info.bits); - return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3)); + return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3))); } const FormatIntLiteralContext = struct { @@ -7438,7 +7438,7 @@ fn formatIntLiteral( } else data.val.toBigInt(&int_buf, mod); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); - const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8); + const c_bits = @as(usize, @intCast(data.cty.byteSize(data.dg.ctypes.set, target) * 8)); var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined; const one = BigInt.Mutable.init(&one_limbs, 1).toConst(); @@ -7471,7 +7471,7 @@ fn formatIntLiteral( const array_data = data.cty.castTag(.array).?.data; break :info .{ .cty = data.dg.indexToCType(array_data.elem_type), - .count = @intCast(usize, array_data.len), + .count = @as(usize, @intCast(array_data.len)), .endian = target.cpu.arch.endian(), .homogeneous = true, }; @@ -7527,7 +7527,7 @@ fn formatIntLiteral( var c_limb_int_info = std.builtin.Type.Int{ .signedness = undefined, - .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)), + .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))), }; var c_limb_cty: CType = undefined; @@ -7727,7 +7727,7 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type { fn lowersToArray(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, - else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, + else => return ty.isAbiInt(mod) and toCIntBits(@as(u32, @intCast(ty.bitSize(mod)))) == null, }; } @@ -7735,7 +7735,7 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi assert(operands.len <= Liveness.bpi - 1); var tomb_bits = f.liveness.getTombBits(inst); for (operands) |operand| { - const dies = @truncate(u1, tomb_bits) != 0; + const dies = @as(u1, @truncate(tomb_bits)) != 0; tomb_bits >>= 1; if (!dies) continue; try die(f, inst, operand); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index c8ce0be38015..efff2e557c35 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -138,7 +138,7 @@ pub const CType = extern union { pub fn toIndex(self: Tag) Index { assert(!self.hasPayload()); - return @intCast(Index, @intFromEnum(self)); + return @as(Index, @intCast(@intFromEnum(self))); } pub fn Type(comptime self: Tag) type { @@ -330,7 +330,7 @@ pub const CType = extern union { store: *const Set, pub fn hash(self: @This(), cty: CType) Map.Hash { - return @truncate(Map.Hash, cty.hash(self.store.*)); + return @as(Map.Hash, @truncate(cty.hash(self.store.*))); } pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { return lhs.eql(rhs); @@ -340,7 +340,7 @@ pub const CType = extern union { map: Map = .{}, pub fn indexToCType(self: Set, index: Index) CType { - if (index < Tag.no_payload_count) return initTag(@enumFromInt(Tag, index)); + if (index < Tag.no_payload_count) return initTag(@as(Tag, @enumFromInt(index))); return self.map.keys()[index - Tag.no_payload_count]; } @@ -362,7 +362,7 @@ pub const CType = extern union { return if (self.map.getIndexAdapted( ty, TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert }, - )) |idx| @intCast(Index, Tag.no_payload_count + idx) else null; + )) |idx| @as(Index, @intCast(Tag.no_payload_count + idx)) else null; } }; @@ -376,7 +376,7 @@ pub const CType = extern union { pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { const t = cty.tag(); - if (@intFromEnum(t) < Tag.no_payload_count) return @intCast(Index, @intFromEnum(t)); + if (@intFromEnum(t) < Tag.no_payload_count) return @as(Index, @intCast(@intFromEnum(t))); const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set }); if (!gop.found_existing) gop.key_ptr.* = cty; @@ -386,7 +386,7 @@ pub const CType = extern union { assert(cty.eql(key.*)); assert(cty.hash(self.set) == key.hash(self.set)); } - return @intCast(Index, Tag.no_payload_count + gop.index); + return @as(Index, @intCast(Tag.no_payload_count + gop.index)); } pub fn typeToIndex( @@ -424,7 +424,7 @@ pub const CType = extern union { assert(adapter.eql(ty, cty.*)); assert(adapter.hash(ty) == cty.hash(self.set)); } - return @intCast(Index, Tag.no_payload_count + gop.index); + return @as(Index, @intCast(Tag.no_payload_count + gop.index)); } }; @@ -1388,7 +1388,7 @@ pub const CType = extern union { .len = @divExact(abi_size, abi_align), .elem_type = tagFromIntInfo(.{ .signedness = .unsigned, - .bits = @intCast(u16, abi_align * 8), + .bits = @as(u16, @intCast(abi_align * 8)), }).toIndex(), } } }; self.value = .{ .cty = initPayload(&self.storage.seq) }; @@ -1492,7 +1492,7 @@ pub const CType = extern union { if (mod.typeToStruct(ty)) |struct_obj| { try self.initType(struct_obj.backing_int_ty, kind, lookup); } else { - const bits = @intCast(u16, ty.bitSize(mod)); + const bits = @as(u16, @intCast(ty.bitSize(mod))); const int_ty = try mod.intType(.unsigned, bits); try self.initType(int_ty, kind, lookup); } @@ -2299,7 +2299,7 @@ pub const CType = extern union { } pub fn hash(self: @This(), ty: Type) u32 { - return @truncate(u32, self.to64().hash(ty)); + return @as(u32, @truncate(self.to64().hash(ty))); } }; }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 75b8a19e7dfd..dd8abb120577 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -592,7 +592,7 @@ pub const Object = struct { llvm_errors[0] = llvm_slice_ty.getUndef(); for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { const name = mod.intern_pool.stringToSlice(name_nts); - const str_init = o.context.constString(name.ptr, @intCast(c_uint, name.len), .False); + const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); const str_global = o.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); str_global.setLinkage(.Private); @@ -607,7 +607,7 @@ pub const Object = struct { llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); } - const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @intCast(c_uint, error_name_list.len)); + const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len))); const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), ""); error_name_table_global.setInitializer(error_name_table_init); @@ -1027,7 +1027,7 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try o.lowerType(param_ty); - const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); + const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); const int_llvm_ty = o.context.intType(abi_size * 8); const alignment = @max( param_ty.abiAlignment(mod), @@ -1053,7 +1053,7 @@ pub const Object = struct { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { - if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { o.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } @@ -1083,9 +1083,9 @@ pub const Object = struct { const param_llvm_ty = try o.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); - const llvm_ty = o.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); + const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False); for (field_types, 0..) |_, field_i_usize| { - const field_i = @intCast(c_uint, field_i_usize); + const field_i = @as(c_uint, @intCast(field_i_usize)); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); @@ -1289,11 +1289,11 @@ pub const Object = struct { if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { - const di_func = @ptrCast(*llvm.DISubprogram, di_node); + const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); + const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_global.replaceLinkageName(linkage_name); } @@ -1315,11 +1315,11 @@ pub const Object = struct { if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { - const di_func = @ptrCast(*llvm.DISubprogram, di_node); + const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); + const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_global.replaceLinkageName(linkage_name); } @@ -1390,7 +1390,7 @@ pub const Object = struct { const gop = try o.di_map.getOrPut(gpa, file); errdefer assert(o.di_map.remove(file)); if (gop.found_existing) { - return @ptrCast(*llvm.DIFile, gop.value_ptr.*); + return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*)); } const dir_path_z = d: { var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined; @@ -1514,7 +1514,7 @@ pub const Object = struct { if (@sizeOf(usize) == @sizeOf(u64)) { enumerators[i] = dib.createEnumerator2( field_name_z, - @intCast(c_uint, bigint.limbs.len), + @as(c_uint, @intCast(bigint.limbs.len)), bigint.limbs.ptr, int_info.bits, int_info.signedness == .unsigned, @@ -1538,7 +1538,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, enumerators.ptr, - @intCast(c_int, enumerators.len), + @as(c_int, @intCast(enumerators.len)), try o.lowerDebugType(int_ty, .full), "", ); @@ -1713,7 +1713,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(mod), .full), - @intCast(i64, ty.arrayLen(mod)), + @as(i64, @intCast(ty.arrayLen(mod))), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty)); @@ -2018,7 +2018,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), + @as(c_int, @intCast(di_fields.items.len)), 0, // run time lang null, // vtable holder "", // unique id @@ -2105,7 +2105,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), + @as(c_int, @intCast(di_fields.items.len)), 0, // run time lang null, // vtable holder "", // unique id @@ -2217,7 +2217,7 @@ pub const Object = struct { ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), + @as(c_int, @intCast(di_fields.items.len)), 0, // run time lang "", // unique id ); @@ -2330,7 +2330,7 @@ pub const Object = struct { const fn_di_ty = dib.createSubroutineType( param_di_types.items.ptr, - @intCast(c_int, param_di_types.items.len), + @as(c_int, @intCast(param_di_types.items.len)), 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -2487,7 +2487,7 @@ pub const Object = struct { } if (fn_info.alignment.toByteUnitsOptional()) |a| { - llvm_fn.setAlignment(@intCast(c_uint, a)); + llvm_fn.setAlignment(@as(c_uint, @intCast(a))); } // Function attributes that are independent of analysis results of the function body. @@ -2710,7 +2710,7 @@ pub const Object = struct { if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try o.lowerType(elem_ty); const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null); - return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); + return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len))); }, .Vector => { const elem_type = try o.lowerType(t.childType(mod)); @@ -2732,7 +2732,7 @@ pub const Object = struct { }; const offset = child_ty.abiSize(mod) + 1; const abi_size = t.abiSize(mod); - const padding = @intCast(c_uint, abi_size - offset); + const padding = @as(c_uint, @intCast(abi_size - offset)); if (padding == 0) { return o.context.structType(&fields_buf, 2, .False); } @@ -2761,7 +2761,7 @@ pub const Object = struct { std.mem.alignForward(u64, error_size, payload_align) + payload_size; const abi_size = std.mem.alignForward(u64, payload_end, error_align); - const padding = @intCast(c_uint, abi_size - payload_end); + const padding = @as(c_uint, @intCast(abi_size - payload_end)); if (padding == 0) { return o.context.structType(&fields_buf, 2, .False); } @@ -2774,7 +2774,7 @@ pub const Object = struct { std.mem.alignForward(u64, payload_size, error_align) + error_size; const abi_size = std.mem.alignForward(u64, error_end, payload_align); - const padding = @intCast(c_uint, abi_size - error_end); + const padding = @as(c_uint, @intCast(abi_size - error_end)); if (padding == 0) { return o.context.structType(&fields_buf, 2, .False); } @@ -2811,7 +2811,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } const field_llvm_ty = try o.lowerType(field_ty.toType()); @@ -2824,14 +2824,14 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } } llvm_struct_ty.structSetBody( llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), + @as(c_uint, @intCast(llvm_field_types.items.len)), .False, ); @@ -2880,7 +2880,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } const field_llvm_ty = try o.lowerType(field.ty); @@ -2893,14 +2893,14 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } } llvm_struct_ty.structSetBody( llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), + @as(c_uint, @intCast(llvm_field_types.items.len)), llvm.Bool.fromBool(any_underaligned_fields), ); @@ -2914,7 +2914,7 @@ pub const Object = struct { const union_obj = mod.typeToUnion(t).?; if (union_obj.layout == .Packed) { - const bitsize = @intCast(c_uint, t.bitSize(mod)); + const bitsize = @as(c_uint, @intCast(t.bitSize(mod))); const int_llvm_ty = o.context.intType(bitsize); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; @@ -2939,9 +2939,9 @@ pub const Object = struct { break :t llvm_aligned_field_ty; } const padding_len = if (layout.tag_size == 0) - @intCast(c_uint, layout.abi_size - layout.most_aligned_field_size) + @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size)) else - @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); + @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size)); const fields: [2]*llvm.Type = .{ llvm_aligned_field_ty, o.context.intType(8).arrayType(padding_len), @@ -3020,7 +3020,7 @@ pub const Object = struct { }, .abi_sized_int => { const param_ty = fn_info.param_types[it.zig_index - 1].toType(); - const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); + const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); try llvm_params.append(o.context.intType(abi_size * 8)); }, .slice => { @@ -3045,7 +3045,7 @@ pub const Object = struct { .float_array => |count| { const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); - const field_count = @intCast(c_uint, count); + const field_count = @as(c_uint, @intCast(count)); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); }, @@ -3059,7 +3059,7 @@ pub const Object = struct { return llvm.functionType( llvm_ret_ty, llvm_params.items.ptr, - @intCast(c_uint, llvm_params.items.len), + @as(c_uint, @intCast(llvm_params.items.len)), llvm.Bool.fromBool(fn_info.is_var_args), ); } @@ -3219,7 +3219,7 @@ pub const Object = struct { } if (@sizeOf(usize) == @sizeOf(u64)) { break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), + @as(c_uint, @intCast(bigint.limbs.len)), bigint.limbs.ptr, ); } @@ -3234,19 +3234,19 @@ pub const Object = struct { const llvm_ty = try o.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); + const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); const llvm_i16 = o.context.intType(16); const int = llvm_i16.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); + const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod))); const llvm_i32 = o.context.intType(32); const int = llvm_i32.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); + const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod))); const llvm_i64 = o.context.intType(64); const int = llvm_i64.constInt(repr, .False); return int.constBitCast(llvm_ty); @@ -3265,7 +3265,7 @@ pub const Object = struct { } }, 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); + var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod))); // LLVM seems to require that the lower half of the f128 be placed first // in the buffer. if (native_endian == .Big) { @@ -3343,7 +3343,7 @@ pub const Object = struct { .array_type => switch (aggregate.storage) { .bytes => |bytes| return o.context.constString( bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))), .True, // Don't null terminate. Bytes has the sentinel, if any. ), .elems => |elem_vals| { @@ -3358,21 +3358,21 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), .True, ); } else { const llvm_elem_ty = try o.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), ); } }, .repeated_elem => |val| { const elem_ty = tv.ty.childType(mod); const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len = @as(usize, @intCast(tv.ty.arrayLen(mod))); const len_including_sent = len + @intFromBool(sentinel != null); const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); defer gpa.free(llvm_elems); @@ -3393,14 +3393,14 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), .True, ); } else { const llvm_elem_ty = try o.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), ); } }, @@ -3425,7 +3425,7 @@ pub const Object = struct { } return llvm.constVector( llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + @as(c_uint, @intCast(llvm_elems.len)), ); }, .anon_struct_type => |tuple| { @@ -3450,7 +3450,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); @@ -3472,7 +3472,7 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } } @@ -3480,14 +3480,14 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), .False, ); } else { const llvm_struct_ty = try o.lowerType(tv.ty); return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), ); } }, @@ -3498,7 +3498,7 @@ pub const Object = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = o.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits))); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -3510,7 +3510,7 @@ pub const Object = struct { .ty = field.ty, .val = try tv.val.fieldValue(mod, i), }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const small_int_ty = o.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) @@ -3547,7 +3547,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); @@ -3569,7 +3569,7 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } } @@ -3577,13 +3577,13 @@ pub const Object = struct { if (need_unnamed) { return o.context.constStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), .False, ); } else { return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), + @as(c_uint, @intCast(llvm_fields.items.len)), ); } }, @@ -3616,7 +3616,7 @@ pub const Object = struct { if (!field_ty.hasRuntimeBits(mod)) return llvm_union_ty.constNull(); const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @intCast(u16, field_ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod))); const small_int_ty = o.context.intType(ty_bit_size); const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) @@ -3632,7 +3632,7 @@ pub const Object = struct { var need_unnamed: bool = layout.most_aligned_field != field_index; const payload = p: { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @intCast(c_uint, layout.payload_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size)); break :p o.context.intType(8).arrayType(padding_len).getUndef(); } const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); @@ -3641,7 +3641,7 @@ pub const Object = struct { if (field_size == layout.payload_size) { break :p field; } - const padding_len = @intCast(c_uint, layout.payload_size - field_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); const fields: [2]*llvm.Value = .{ field, o.context.intType(8).arrayType(padding_len).getUndef(), }; @@ -3706,7 +3706,7 @@ pub const Object = struct { } if (@sizeOf(usize) == @sizeOf(u64)) { break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), + @as(c_uint, @intCast(bigint.limbs.len)), bigint.limbs.ptr, ); } @@ -3799,7 +3799,7 @@ pub const Object = struct { const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - const field_index = @intCast(u32, field_ptr.index); + const field_index = @as(u32, @intCast(field_ptr.index)); const llvm_u32 = o.context.intType(32); switch (parent_ty.zigTypeTag(mod)) { .Union => { @@ -3834,7 +3834,7 @@ pub const Object = struct { var b: usize = 0; for (parent_ty.structFields(mod).values()[0..field_index]) |field| { if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @intCast(usize, field.ty.bitSize(mod)); + b += @as(usize, @intCast(field.ty.bitSize(mod))); } break :b b; }; @@ -3992,9 +3992,9 @@ pub const Object = struct { ) void { const llvm_attr = o.context.createStringAttribute( name.ptr, - @intCast(c_uint, name.len), + @as(c_uint, @intCast(name.len)), value.ptr, - @intCast(c_uint, value.len), + @as(c_uint, @intCast(value.len)), ); val.addAttributeAtIndex(index, llvm_attr); } @@ -4026,14 +4026,14 @@ pub const Object = struct { .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return null; - return o.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); + return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8))); }, .Bool => return o.context.intType(8), else => return null, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8)); + return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8))); } else { return null; } @@ -4051,7 +4051,7 @@ pub const Object = struct { if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { - if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { o.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); } } @@ -4550,7 +4550,7 @@ pub const FuncGen = struct { fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const o = self.dg.object; const mod = o.module; const callee_ty = self.typeOf(pl_op.operand); @@ -4638,7 +4638,7 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); + const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); const int_llvm_ty = self.context.intType(abi_size * 8); if (isByRef(param_ty, mod)) { @@ -4683,10 +4683,10 @@ pub const FuncGen = struct { break :p p; }; - const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False); + const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False); try llvm_args.ensureUnusedCapacity(it.llvm_types_len); for (llvm_types, 0..) |field_ty, i_usize| { - const i = @intCast(c_uint, i_usize); + const i = @as(c_uint, @intCast(i_usize)); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); load_inst.setAlignment(target.ptrBitWidth() / 8); @@ -4742,7 +4742,7 @@ pub const FuncGen = struct { try o.lowerType(zig_fn_ty), llvm_fn, llvm_args.items.ptr, - @intCast(c_uint, llvm_args.items.len), + @as(c_uint, @intCast(llvm_args.items.len)), toLlvmCallConv(fn_info.cc, target), attr, "", @@ -4788,7 +4788,7 @@ pub const FuncGen = struct { const llvm_arg_i = it.llvm_index - 2; if (math.cast(u5, it.zig_index - 1)) |i| { - if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { o.addArgAttr(call, llvm_arg_i, "noalias"); } } @@ -5213,7 +5213,7 @@ pub const FuncGen = struct { phi_node.addIncoming( breaks.items(.val).ptr, breaks.items(.bb).ptr, - @intCast(c_uint, breaks.len), + @as(c_uint, @intCast(breaks.len)), ); return phi_node; } @@ -5379,7 +5379,7 @@ pub const FuncGen = struct { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -5479,7 +5479,7 @@ pub const FuncGen = struct { } } - const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod)); + const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod))); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); var extended = e: { @@ -5540,7 +5540,7 @@ pub const FuncGen = struct { } } - const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod)))); const ret_ty = self.context.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -5806,12 +5806,12 @@ pub const FuncGen = struct { const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -5828,12 +5828,12 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -5924,8 +5924,8 @@ pub const FuncGen = struct { fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { const di_scope = self.di_scope orelse return null; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - self.prev_dbg_line = @intCast(c_uint, self.base_line + dbg_stmt.line + 1); - self.prev_dbg_column = @intCast(c_uint, dbg_stmt.column + 1); + self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1)); + self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1)); const inlined_at = if (self.dbg_inlined.items.len > 0) self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc else @@ -5949,7 +5949,7 @@ pub const FuncGen = struct { const cur_debug_location = self.builder.getCurrentDebugLocation2(); try self.dbg_inlined.append(self.gpa, .{ - .loc = @ptrCast(*llvm.DILocation, cur_debug_location), + .loc = @as(*llvm.DILocation, @ptrCast(cur_debug_location)), .scope = self.di_scope.?, .base_line = self.base_line, }); @@ -6107,13 +6107,13 @@ pub const FuncGen = struct { const o = self.dg.object; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; var llvm_constraints: std.ArrayListUnmanaged(u8) = .{}; @@ -6390,7 +6390,7 @@ pub const FuncGen = struct { 1 => llvm_ret_types[0], else => self.context.structType( llvm_ret_types.ptr, - @intCast(c_uint, return_count), + @as(c_uint, @intCast(return_count)), .False, ), }; @@ -6398,7 +6398,7 @@ pub const FuncGen = struct { const llvm_fn_ty = llvm.functionType( ret_llvm_ty, llvm_param_types.ptr, - @intCast(c_uint, param_count), + @as(c_uint, @intCast(param_count)), .False, ); const asm_fn = llvm.getInlineAsm( @@ -6416,7 +6416,7 @@ pub const FuncGen = struct { llvm_fn_ty, asm_fn, llvm_param_values.ptr, - @intCast(c_uint, param_count), + @as(c_uint, @intCast(param_count)), .C, .Auto, "", @@ -6433,7 +6433,7 @@ pub const FuncGen = struct { if (llvm_ret_indirect[i]) continue; const output_value = if (return_count > 1) b: { - break :b self.builder.buildExtractValue(call, @intCast(c_uint, llvm_ret_i), ""); + break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), ""); } else call; if (output != .none) { @@ -7315,7 +7315,7 @@ pub const FuncGen = struct { result_vector: *llvm.Value, vector_len: usize, ) !*llvm.Value { - const args_len = @intCast(c_uint, args_vectors.len); + const args_len = @as(c_uint, @intCast(args_vectors.len)); const llvm_i32 = self.context.intType(32); assert(args_len <= 3); @@ -7345,7 +7345,7 @@ pub const FuncGen = struct { const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len); break :b if (alias) |a| a.getAliasee() else null; } orelse b: { - const params_len = @intCast(c_uint, param_types.len); + const params_len = @as(c_uint, @intCast(param_types.len)); const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False); const f = o.llvm_module.addFunction(fn_name, fn_type); break :b f; @@ -8319,8 +8319,8 @@ pub const FuncGen = struct { return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false); - const ptr_alignment = @intCast(u32, ptr_info.flags.alignment.toByteUnitsOptional() orelse - ptr_info.child.toType().abiAlignment(mod)); + const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse + ptr_info.child.toType().abiAlignment(mod))); const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); const elem_llvm_ty = try o.lowerType(elem_ty); @@ -8696,10 +8696,10 @@ pub const FuncGen = struct { const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); - const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); + const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len))); for (names) |name| { - const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); + const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); const this_tag_int_value = try o.lowerValue(.{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, err_int), @@ -8779,10 +8779,10 @@ pub const FuncGen = struct { const named_block = self.context.appendBasicBlock(fn_val, "Named"); const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); + const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len))); for (enum_type.names, 0..) |_, field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const this_tag_int_value = int: { break :int try o.lowerValue(.{ .ty = enum_ty, @@ -8855,16 +8855,16 @@ pub const FuncGen = struct { const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len)); + const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len))); const array_ptr_indices = [_]*llvm.Value{ usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; for (enum_type.names, 0..) |name_ip, field_index_usize| { - const field_index = @intCast(u32, field_index_usize); + const field_index = @as(u32, @intCast(field_index_usize)); const name = mod.intern_pool.stringToSlice(name_ip); - const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); + const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, ""); str_global.setInitializer(str_init); @@ -8986,7 +8986,7 @@ pub const FuncGen = struct { val.* = llvm_i32.getUndef(); } else { const int = elem.toSignedInt(mod); - const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); + const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); val.* = llvm_i32.constInt(unsigned, .False); } } @@ -9150,8 +9150,8 @@ pub const FuncGen = struct { const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(result_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); const llvm_result_ty = try o.lowerType(result_ty); switch (result_ty.zigTypeTag(mod)) { @@ -9171,7 +9171,7 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -9181,7 +9181,7 @@ pub const FuncGen = struct { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const small_int_ty = self.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") @@ -9251,7 +9251,7 @@ pub const FuncGen = struct { for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), - llvm_usize.constInt(@intCast(c_uint, i), .False), + llvm_usize.constInt(@as(c_uint, @intCast(i)), .False), }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveInst(elem); @@ -9260,7 +9260,7 @@ pub const FuncGen = struct { if (array_info.sentinel) |sent_val| { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), - llvm_usize.constInt(@intCast(c_uint, array_info.len), .False), + llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False), }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveValue(.{ @@ -9289,10 +9289,10 @@ pub const FuncGen = struct { if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); + const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); const small_int_ty = self.context.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") @@ -9332,13 +9332,13 @@ pub const FuncGen = struct { const llvm_union_ty = t: { const payload = p: { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @intCast(c_uint, layout.payload_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size)); break :p self.context.intType(8).arrayType(padding_len); } if (field_size == layout.payload_size) { break :p field_llvm_ty; } - const padding_len = @intCast(c_uint, layout.payload_size - field_size); + const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); const fields: [2]*llvm.Type = .{ field_llvm_ty, self.context.intType(8).arrayType(padding_len), }; @@ -9766,8 +9766,8 @@ pub const FuncGen = struct { const elem_ty = info.child.toType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - const ptr_alignment = @intCast(u32, info.flags.alignment.toByteUnitsOptional() orelse - elem_ty.abiAlignment(mod)); + const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse + elem_ty.abiAlignment(mod))); const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); assert(info.flags.vector_index != .runtime); @@ -9799,7 +9799,7 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(elem_ty); @@ -9872,7 +9872,7 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); + const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit @@ -9945,7 +9945,7 @@ pub const FuncGen = struct { if (!target_util.hasValgrindSupport(target)) return default_value; const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod)); + const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod))); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { @@ -9957,7 +9957,7 @@ pub const FuncGen = struct { const zero = usize_llvm_ty.constInt(0, .False); for (array_elements, 0..) |elem, i| { const indexes = [_]*llvm.Value{ - zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False), + zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False), }; const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, ""); const store_inst = fg.builder.buildStore(elem, elem_ptr); @@ -10530,7 +10530,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { assert(classes[0] == .direct and classes[1] == .none); const scalar_type = wasm_c_abi.scalarType(return_type, mod); const abi_size = scalar_type.abiSize(mod); - return o.context.intType(@intCast(c_uint, abi_size * 8)); + return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); }, .aarch64, .aarch64_be => { switch (aarch64_c_abi.classifyType(return_type, mod)) { @@ -10539,7 +10539,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { .byval => return o.lowerType(return_type), .integer => { const bit_size = return_type.bitSize(mod); - return o.context.intType(@intCast(c_uint, bit_size)); + return o.context.intType(@as(c_uint, @intCast(bit_size))); }, .double_integer => return o.context.intType(64).arrayType(2), } @@ -10560,7 +10560,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { .memory => return o.context.voidType(), .integer => { const bit_size = return_type.bitSize(mod); - return o.context.intType(@intCast(c_uint, bit_size)); + return o.context.intType(@as(c_uint, @intCast(bit_size))); }, .double_integer => { var llvm_types_buffer: [2]*llvm.Type = .{ @@ -10598,7 +10598,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { return o.lowerType(return_type); } else { const abi_size = return_type.abiSize(mod); - return o.context.intType(@intCast(c_uint, abi_size * 8)); + return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); } }, .win_i128 => return o.context.intType(64).vectorType(2), @@ -10656,7 +10656,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type } if (classes[0] == .integer and classes[1] == .none) { const abi_size = return_type.abiSize(mod); - return o.context.intType(@intCast(c_uint, abi_size * 8)); + return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); } return o.context.structType(&llvm_types_buffer, llvm_types_index, .False); } @@ -11145,28 +11145,28 @@ const AnnotatedDITypePtr = enum(usize) { fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); - assert(@truncate(u1, addr) == 0); - return @enumFromInt(AnnotatedDITypePtr, addr | 1); + assert(@as(u1, @truncate(addr)) == 0); + return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1)); } fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); - return @enumFromInt(AnnotatedDITypePtr, addr); + return @as(AnnotatedDITypePtr, @enumFromInt(addr)); } fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); const bit = @intFromBool(resolve == .fwd); - return @enumFromInt(AnnotatedDITypePtr, addr | bit); + return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit)); } fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType { const fixed_addr = @intFromEnum(self) & ~@as(usize, 1); - return @ptrFromInt(*llvm.DIType, fixed_addr); + return @as(*llvm.DIType, @ptrFromInt(fixed_addr)); } fn isFwdOnly(self: AnnotatedDITypePtr) bool { - return @truncate(u1, @intFromEnum(self)) != 0; + return @as(u1, @truncate(@intFromEnum(self))) != 0; } }; diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index a8249a870fad..b093588e80c1 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -8,7 +8,7 @@ pub const Bool = enum(c_int) { _, pub fn fromBool(b: bool) Bool { - return @enumFromInt(Bool, @intFromBool(b)); + return @as(Bool, @enumFromInt(@intFromBool(b))); } pub fn toBool(b: Bool) bool { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index d81ca9a015fb..220909476fec 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -466,7 +466,7 @@ pub const DeclGen = struct { unused.* = undef; } - const word = @bitCast(Word, self.partial_word.buffer); + const word = @as(Word, @bitCast(self.partial_word.buffer)); const result_id = try self.dg.spv.constInt(self.u32_ty_ref, word); try self.members.append(self.u32_ty_ref); try self.initializers.append(result_id); @@ -482,7 +482,7 @@ pub const DeclGen = struct { } fn addUndef(self: *@This(), amt: u64) !void { - for (0..@intCast(usize, amt)) |_| { + for (0..@as(usize, @intCast(amt))) |_| { try self.addByte(undef); } } @@ -539,13 +539,13 @@ pub const DeclGen = struct { const mod = self.dg.module; const int_info = ty.intInfo(mod); const int_bits = switch (int_info.signedness) { - .signed => @bitCast(u64, val.toSignedInt(mod)), + .signed => @as(u64, @bitCast(val.toSignedInt(mod))), .unsigned => val.toUnsignedInt(mod), }; // TODO: Swap endianess if the compiler is big endian. const len = ty.abiSize(mod); - try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&int_bits)[0..@as(usize, @intCast(len))]); } fn addFloat(self: *@This(), ty: Type, val: Value) !void { @@ -557,15 +557,15 @@ pub const DeclGen = struct { switch (ty.floatBits(target)) { 16 => { const float_bits = val.toFloat(f16, mod); - try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]); }, 32 => { const float_bits = val.toFloat(f32, mod); - try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]); }, 64 => { const float_bits = val.toFloat(f64, mod); - try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); + try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]); }, else => unreachable, } @@ -664,7 +664,7 @@ pub const DeclGen = struct { .int => try self.addInt(ty, val), .err => |err| { const int = try mod.getErrorValue(err.name); - try self.addConstInt(u16, @intCast(u16, int)); + try self.addConstInt(u16, @as(u16, @intCast(int))); }, .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); @@ -755,10 +755,10 @@ pub const DeclGen = struct { switch (aggregate.storage) { .bytes => |bytes| try self.addBytes(bytes), .elems, .repeated_elem => { - for (0..@intCast(usize, array_type.len)) |i| { + for (0..@as(usize, @intCast(array_type.len))) |i| { try self.lower(elem_ty, switch (aggregate.storage) { .bytes => unreachable, - .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(), + .elems => |elem_vals| elem_vals[@as(usize, @intCast(i))].toValue(), .repeated_elem => |elem_val| elem_val.toValue(), }); } @@ -1132,7 +1132,7 @@ pub const DeclGen = struct { const payload_padding_len = layout.payload_size - active_field_size; if (payload_padding_len != 0) { - const payload_padding_ty_ref = try self.spv.arrayType(@intCast(u32, payload_padding_len), u8_ty_ref); + const payload_padding_ty_ref = try self.spv.arrayType(@as(u32, @intCast(payload_padding_len)), u8_ty_ref); member_types.appendAssumeCapacity(payload_padding_ty_ref); member_names.appendAssumeCapacity(try self.spv.resolveString("payload_padding")); } @@ -1259,7 +1259,7 @@ pub const DeclGen = struct { return try self.spv.resolve(.{ .vector_type = .{ .component_type = try self.resolveType(ty.childType(mod), repr), - .component_count = @intCast(u32, ty.vectorLen(mod)), + .component_count = @as(u32, @intCast(ty.vectorLen(mod))), } }); }, .Struct => { @@ -1588,7 +1588,7 @@ pub const DeclGen = struct { init_val, actual_storage_class, final_storage_class == .Generic, - @intCast(u32, decl.alignment.toByteUnits(0)), + @as(u32, @intCast(decl.alignment.toByteUnits(0))), ); } } @@ -1856,7 +1856,7 @@ pub const DeclGen = struct { } fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef { - const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @intCast(u6, bits)) - 1; + const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(bits))) - 1; const result_id = self.spv.allocId(); const mask_id = try self.spv.constInt(ty_ref, mask_value); try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{ @@ -2063,7 +2063,7 @@ pub const DeclGen = struct { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { const int = elem.toSignedInt(mod); - const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); + const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); self.func.body.writeOperand(spec.LiteralInteger, unsigned); } } @@ -2689,7 +2689,7 @@ pub const DeclGen = struct { // are not allowed to be created from a phi node, and throw an error for those. const result_type_id = try self.resolveTypeId(ty); - try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... + try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @as(u16, @intCast(incoming_blocks.items.len * 2))); // result type + result + variable/parent... self.func.body.writeOperand(spec.IdResultType, result_type_id); self.func.body.writeOperand(spec.IdRef, result_id); @@ -3105,7 +3105,7 @@ pub const DeclGen = struct { while (case_i < num_cases) : (case_i += 1) { // SPIR-V needs a literal here, which' width depends on the case condition. const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -3116,7 +3116,7 @@ pub const DeclGen = struct { return self.todo("switch on runtime value???", .{}); }; const int_val = switch (cond_ty.zigTypeTag(mod)) { - .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), + .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod), .Enum => blk: { // TODO: figure out of cond_ty is correct (something with enum literals) break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants @@ -3124,7 +3124,7 @@ pub const DeclGen = struct { else => unreachable, }; const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) { - 1 => .{ .uint32 = @intCast(u32, int_val) }, + 1 => .{ .uint32 = @as(u32, @intCast(int_val)) }, 2 => .{ .uint64 = int_val }, else => unreachable, }; @@ -3139,7 +3139,7 @@ pub const DeclGen = struct { var case_i: u32 = 0; while (case_i < num_cases) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -3167,15 +3167,15 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); if (!is_volatile and self.liveness.isUnused(inst)) return null; var extra_i: usize = extra.end; - const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; if (outputs.len > 1) { @@ -3297,7 +3297,7 @@ pub const DeclGen = struct { const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig index 73a842ebe9ec..8f466668eae7 100644 --- a/src/codegen/spirv/Assembler.zig +++ b/src/codegen/spirv/Assembler.zig @@ -293,7 +293,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits}); }, } - break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } }); + break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @as(u16, @intCast(bits)) } }); }, .OpTypeVector => try self.spv.resolve(.{ .vector_type = .{ .component_type = try self.resolveTypeRef(operands[1].ref_id), @@ -306,7 +306,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { }, .OpTypePointer => try self.spv.ptrType( try self.resolveTypeRef(operands[2].ref_id), - @enumFromInt(spec.StorageClass, operands[1].value), + @as(spec.StorageClass, @enumFromInt(operands[1].value)), ), .OpTypeFunction => blk: { const param_operands = operands[2..]; @@ -340,7 +340,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue { else => switch (self.inst.opcode) { .OpEntryPoint => unreachable, .OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes, - .OpVariable => switch (@enumFromInt(spec.StorageClass, operands[2].value)) { + .OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) { .Function => &self.func.prologue, else => { // This is currently disabled because global variables are required to be @@ -391,7 +391,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue { } const actual_word_count = section.instructions.items.len - first_word; - section.instructions.items[first_word] |= @as(u32, @intCast(u16, actual_word_count)) << 16 | @intFromEnum(self.inst.opcode); + section.instructions.items[first_word] |= @as(u32, @as(u16, @intCast(actual_word_count))) << 16 | @intFromEnum(self.inst.opcode); if (maybe_result_id) |result| { return AsmValue{ .value = result }; @@ -458,7 +458,7 @@ fn parseInstruction(self: *Assembler) !void { if (!entry.found_existing) { entry.value_ptr.* = .just_declared; } - break :blk @intCast(AsmValue.Ref, entry.index); + break :blk @as(AsmValue.Ref, @intCast(entry.index)); } else null; const opcode_tok = self.currentToken(); @@ -613,7 +613,7 @@ fn parseRefId(self: *Assembler) !void { entry.value_ptr.* = .unresolved_forward_reference; } - const index = @intCast(AsmValue.Ref, entry.index); + const index = @as(AsmValue.Ref, @intCast(entry.index)); try self.inst.operands.append(self.gpa, .{ .ref_id = index }); } @@ -645,7 +645,7 @@ fn parseString(self: *Assembler) !void { else text[1..]; - const string_offset = @intCast(u32, self.inst.string_bytes.items.len); + const string_offset = @as(u32, @intCast(self.inst.string_bytes.items.len)); try self.inst.string_bytes.ensureUnusedCapacity(self.gpa, literal.len + 1); self.inst.string_bytes.appendSliceAssumeCapacity(literal); self.inst.string_bytes.appendAssumeCapacity(0); @@ -693,18 +693,18 @@ fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness const int = std.fmt.parseInt(i128, text, 0) catch break :invalid; const min = switch (signedness) { .unsigned => 0, - .signed => -(@as(i128, 1) << (@intCast(u7, width) - 1)), + .signed => -(@as(i128, 1) << (@as(u7, @intCast(width)) - 1)), }; - const max = (@as(i128, 1) << (@intCast(u7, width) - @intFromBool(signedness == .signed))) - 1; + const max = (@as(i128, 1) << (@as(u7, @intCast(width)) - @intFromBool(signedness == .signed))) - 1; if (int < min or int > max) { break :invalid; } // Note, we store the sign-extended version here. if (width <= @bitSizeOf(spec.Word)) { - try self.inst.operands.append(self.gpa, .{ .literal32 = @truncate(u32, @bitCast(u128, int)) }); + try self.inst.operands.append(self.gpa, .{ .literal32 = @as(u32, @truncate(@as(u128, @bitCast(int)))) }); } else { - try self.inst.operands.append(self.gpa, .{ .literal64 = @truncate(u64, @bitCast(u128, int)) }); + try self.inst.operands.append(self.gpa, .{ .literal64 = @as(u64, @truncate(@as(u128, @bitCast(int)))) }); } return; } @@ -725,7 +725,7 @@ fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void { return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width }); }; - const float_bits = @bitCast(Int, value); + const float_bits = @as(Int, @bitCast(value)); if (width <= @bitSizeOf(spec.Word)) { try self.inst.operands.append(self.gpa, .{ .literal32 = float_bits }); } else { diff --git a/src/codegen/spirv/Cache.zig b/src/codegen/spirv/Cache.zig index 7d7fc0fb0d9d..7a3b6f61f5dc 100644 --- a/src/codegen/spirv/Cache.zig +++ b/src/codegen/spirv/Cache.zig @@ -158,16 +158,16 @@ const Tag = enum { high: u32, fn encode(value: f64) Float64 { - const bits = @bitCast(u64, value); + const bits = @as(u64, @bitCast(value)); return .{ - .low = @truncate(u32, bits), - .high = @truncate(u32, bits >> 32), + .low = @as(u32, @truncate(bits)), + .high = @as(u32, @truncate(bits >> 32)), }; } fn decode(self: Float64) f64 { const bits = @as(u64, self.low) | (@as(u64, self.high) << 32); - return @bitCast(f64, bits); + return @as(f64, @bitCast(bits)); } }; @@ -189,8 +189,8 @@ const Tag = enum { fn encode(ty: Ref, value: u64) Int64 { return .{ .ty = ty, - .low = @truncate(u32, value), - .high = @truncate(u32, value >> 32), + .low = @as(u32, @truncate(value)), + .high = @as(u32, @truncate(value >> 32)), }; } @@ -207,13 +207,13 @@ const Tag = enum { fn encode(ty: Ref, value: i64) Int64 { return .{ .ty = ty, - .low = @truncate(u32, @bitCast(u64, value)), - .high = @truncate(u32, @bitCast(u64, value) >> 32), + .low = @as(u32, @truncate(@as(u64, @bitCast(value)))), + .high = @as(u32, @truncate(@as(u64, @bitCast(value)) >> 32)), }; } fn decode(self: Int64) i64 { - return @bitCast(i64, @as(u64, self.low) | (@as(u64, self.high) << 32)); + return @as(i64, @bitCast(@as(u64, self.low) | (@as(u64, self.high) << 32))); } }; }; @@ -305,21 +305,21 @@ pub const Key = union(enum) { /// Turns this value into the corresponding 32-bit literal, 2s complement signed. fn toBits32(self: Int) u32 { return switch (self.value) { - .uint64 => |val| @intCast(u32, val), - .int64 => |val| if (val < 0) @bitCast(u32, @intCast(i32, val)) else @intCast(u32, val), + .uint64 => |val| @as(u32, @intCast(val)), + .int64 => |val| if (val < 0) @as(u32, @bitCast(@as(i32, @intCast(val)))) else @as(u32, @intCast(val)), }; } fn toBits64(self: Int) u64 { return switch (self.value) { .uint64 => |val| val, - .int64 => |val| @bitCast(u64, val), + .int64 => |val| @as(u64, @bitCast(val)), }; } fn to(self: Int, comptime T: type) T { return switch (self.value) { - inline else => |val| @intCast(T, val), + inline else => |val| @as(T, @intCast(val)), }; } }; @@ -357,9 +357,9 @@ pub const Key = union(enum) { .float => |float| { std.hash.autoHash(&hasher, float.ty); switch (float.value) { - .float16 => |value| std.hash.autoHash(&hasher, @bitCast(u16, value)), - .float32 => |value| std.hash.autoHash(&hasher, @bitCast(u32, value)), - .float64 => |value| std.hash.autoHash(&hasher, @bitCast(u64, value)), + .float16 => |value| std.hash.autoHash(&hasher, @as(u16, @bitCast(value))), + .float32 => |value| std.hash.autoHash(&hasher, @as(u32, @bitCast(value))), + .float64 => |value| std.hash.autoHash(&hasher, @as(u64, @bitCast(value))), } }, .function_type => |func| { @@ -379,7 +379,7 @@ pub const Key = union(enum) { }, inline else => |key| std.hash.autoHash(&hasher, key), } - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } fn eql(a: Key, b: Key) bool { @@ -411,7 +411,7 @@ pub const Key = union(enum) { pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool { _ = b_void; - return ctx.self.lookup(@enumFromInt(Ref, b_index)).eql(a); + return ctx.self.lookup(@as(Ref, @enumFromInt(b_index))).eql(a); } pub fn hash(ctx: @This(), a: Key) u32 { @@ -445,7 +445,7 @@ pub fn materialize(self: *const Self, spv: *Module) !Section { var section = Section{}; errdefer section.deinit(spv.gpa); for (self.items.items(.result_id), 0..) |result_id, index| { - try self.emit(spv, result_id, @enumFromInt(Ref, index), §ion); + try self.emit(spv, result_id, @as(Ref, @enumFromInt(index)), §ion); } return section; } @@ -534,7 +534,7 @@ fn emit( } for (struct_type.memberNames(), 0..) |member_name, i| { if (self.getString(member_name)) |name| { - try spv.memberDebugName(result_id, @intCast(u32, i), "{s}", .{name}); + try spv.memberDebugName(result_id, @as(u32, @intCast(i)), "{s}", .{name}); } } // TODO: Decorations? @@ -557,7 +557,7 @@ fn emit( .float => |float| { const ty_id = self.resultId(float.ty); const lit: Lit = switch (float.value) { - .float16 => |value| .{ .uint32 = @bitCast(u16, value) }, + .float16 => |value| .{ .uint32 = @as(u16, @bitCast(value)) }, .float32 => |value| .{ .float32 = value }, .float64 => |value| .{ .float64 = value }, }; @@ -603,7 +603,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { const adapter: Key.Adapter = .{ .self = self }; const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter); if (entry.found_existing) { - return @enumFromInt(Ref, entry.index); + return @as(Ref, @enumFromInt(entry.index)); } const result_id = spv.allocId(); const item: Item = switch (key) { @@ -640,10 +640,10 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { }, .function_type => |function| blk: { const extra = try self.addExtra(spv, Tag.FunctionType{ - .param_len = @intCast(u32, function.parameters.len), + .param_len = @as(u32, @intCast(function.parameters.len)), .return_type = function.return_type, }); - try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, function.parameters)); + try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(function.parameters))); break :blk .{ .tag = .type_function, .result_id = result_id, @@ -678,12 +678,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { .struct_type => |struct_type| blk: { const extra = try self.addExtra(spv, Tag.SimpleStructType{ .name = struct_type.name, - .members_len = @intCast(u32, struct_type.member_types.len), + .members_len = @as(u32, @intCast(struct_type.member_types.len)), }); - try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, struct_type.member_types)); + try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(struct_type.member_types))); if (struct_type.member_names) |member_names| { - try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, member_names)); + try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(member_names))); break :blk Item{ .tag = .type_struct_simple_with_member_names, .result_id = result_id, @@ -721,7 +721,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { .result_id = result_id, .data = try self.addExtra(spv, Tag.UInt32{ .ty = int.ty, - .value = @intCast(u32, val), + .value = @as(u32, @intCast(val)), }), }; } else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) { @@ -730,20 +730,20 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { .result_id = result_id, .data = try self.addExtra(spv, Tag.Int32{ .ty = int.ty, - .value = @intCast(i32, val), + .value = @as(i32, @intCast(val)), }), }; } else if (val < 0) { break :blk .{ .tag = .int_large, .result_id = result_id, - .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(i64, val))), + .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @as(i64, @intCast(val)))), }; } else { break :blk .{ .tag = .uint_large, .result_id = result_id, - .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(u64, val))), + .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @as(u64, @intCast(val)))), }; } }, @@ -753,12 +753,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { 16 => .{ .tag = .float16, .result_id = result_id, - .data = @bitCast(u16, float.value.float16), + .data = @as(u16, @bitCast(float.value.float16)), }, 32 => .{ .tag = .float32, .result_id = result_id, - .data = @bitCast(u32, float.value.float32), + .data = @as(u32, @bitCast(float.value.float32)), }, 64 => .{ .tag = .float64, @@ -788,7 +788,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { }; try self.items.append(spv.gpa, item); - return @enumFromInt(Ref, entry.index); + return @as(Ref, @enumFromInt(entry.index)); } /// Turn a Ref back into a Key. @@ -797,20 +797,20 @@ pub fn lookup(self: *const Self, ref: Ref) Key { const item = self.items.get(@intFromEnum(ref)); const data = item.data; return switch (item.tag) { - .type_simple => switch (@enumFromInt(Tag.SimpleType, data)) { + .type_simple => switch (@as(Tag.SimpleType, @enumFromInt(data))) { .void => .void_type, .bool => .bool_type, }, .type_int_signed => .{ .int_type = .{ .signedness = .signed, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), } }, .type_int_unsigned => .{ .int_type = .{ .signedness = .unsigned, - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), } }, .type_float => .{ .float_type = .{ - .bits = @intCast(u16, data), + .bits = @as(u16, @intCast(data)), } }, .type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) }, .type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) }, @@ -819,26 +819,26 @@ pub fn lookup(self: *const Self, ref: Ref) Key { return .{ .function_type = .{ .return_type = payload.data.return_type, - .parameters = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.param_len]), + .parameters = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len])), }, }; }, .type_ptr_generic => .{ .ptr_type = .{ .storage_class = .Generic, - .child_type = @enumFromInt(Ref, data), + .child_type = @as(Ref, @enumFromInt(data)), }, }, .type_ptr_crosswgp => .{ .ptr_type = .{ .storage_class = .CrossWorkgroup, - .child_type = @enumFromInt(Ref, data), + .child_type = @as(Ref, @enumFromInt(data)), }, }, .type_ptr_function => .{ .ptr_type = .{ .storage_class = .Function, - .child_type = @enumFromInt(Ref, data), + .child_type = @as(Ref, @enumFromInt(data)), }, }, .type_ptr_simple => { @@ -852,7 +852,7 @@ pub fn lookup(self: *const Self, ref: Ref) Key { }, .type_struct_simple => { const payload = self.extraDataTrail(Tag.SimpleStructType, data); - const member_types = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.members_len]); + const member_types = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len])); return .{ .struct_type = .{ .name = payload.data.name, @@ -864,8 +864,8 @@ pub fn lookup(self: *const Self, ref: Ref) Key { .type_struct_simple_with_member_names => { const payload = self.extraDataTrail(Tag.SimpleStructType, data); const trailing = self.extra.items[payload.trail..]; - const member_types = @ptrCast([]const Ref, trailing[0..payload.data.members_len]); - const member_names = @ptrCast([]const String, trailing[payload.data.members_len..][0..payload.data.members_len]); + const member_types = @as([]const Ref, @ptrCast(trailing[0..payload.data.members_len])); + const member_names = @as([]const String, @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len])); return .{ .struct_type = .{ .name = payload.data.name, @@ -876,11 +876,11 @@ pub fn lookup(self: *const Self, ref: Ref) Key { }, .float16 => .{ .float = .{ .ty = self.get(.{ .float_type = .{ .bits = 16 } }), - .value = .{ .float16 = @bitCast(f16, @intCast(u16, data)) }, + .value = .{ .float16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) }, } }, .float32 => .{ .float = .{ .ty = self.get(.{ .float_type = .{ .bits = 32 } }), - .value = .{ .float32 = @bitCast(f32, data) }, + .value = .{ .float32 = @as(f32, @bitCast(data)) }, } }, .float64 => .{ .float = .{ .ty = self.get(.{ .float_type = .{ .bits = 64 } }), @@ -923,17 +923,17 @@ pub fn lookup(self: *const Self, ref: Ref) Key { } }; }, .undef => .{ .undef = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), } }, .null => .{ .null = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), } }, .bool_true => .{ .bool = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), .value = true, } }, .bool_false => .{ .bool = .{ - .ty = @enumFromInt(Ref, data), + .ty = @as(Ref, @enumFromInt(data)), .value = false, } }, }; @@ -949,7 +949,7 @@ pub fn resultId(self: Self, ref: Ref) IdResult { fn get(self: *const Self, key: Key) Ref { const adapter: Key.Adapter = .{ .self = self }; const index = self.map.getIndexAdapted(key, adapter).?; - return @enumFromInt(Ref, index); + return @as(Ref, @enumFromInt(index)); } fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 { @@ -959,12 +959,12 @@ fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 { } fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 { - const payload_offset = @intCast(u32, self.extra.items.len); + const payload_offset = @as(u32, @intCast(self.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { const field_val = @field(extra, field.name); const word = switch (field.type) { u32 => field_val, - i32 => @bitCast(u32, field_val), + i32 => @as(u32, @bitCast(field_val)), Ref => @intFromEnum(field_val), StorageClass => @intFromEnum(field_val), String => @intFromEnum(field_val), @@ -986,16 +986,16 @@ fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, t const word = self.extra.items[offset + i]; @field(result, field.name) = switch (field.type) { u32 => word, - i32 => @bitCast(i32, word), - Ref => @enumFromInt(Ref, word), - StorageClass => @enumFromInt(StorageClass, word), - String => @enumFromInt(String, word), + i32 => @as(i32, @bitCast(word)), + Ref => @as(Ref, @enumFromInt(word)), + StorageClass => @as(StorageClass, @enumFromInt(word)), + String => @as(String, @enumFromInt(word)), else => @compileError("Invalid type: " ++ @typeName(field.type)), }; } return .{ .data = result, - .trail = offset + @intCast(u32, fields.len), + .trail = offset + @as(u32, @intCast(fields.len)), }; } @@ -1017,7 +1017,7 @@ pub const String = enum(u32) { _ = ctx; var hasher = std.hash.Wyhash.init(0); hasher.update(a); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } }; }; @@ -1032,10 +1032,10 @@ pub fn addString(self: *Self, spv: *Module, str: []const u8) !String { try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len); self.string_bytes.appendSliceAssumeCapacity(str); self.string_bytes.appendAssumeCapacity(0); - entry.value_ptr.* = @intCast(u32, offset); + entry.value_ptr.* = @as(u32, @intCast(offset)); } - return @enumFromInt(String, entry.index); + return @as(String, @enumFromInt(entry.index)); } pub fn getString(self: *const Self, ref: String) ?[]const u8 { diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 9d8cca9445ba..e61ac754eee2 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -451,8 +451,8 @@ pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef { return try self.resolveId(.{ .int = .{ .ty = ty_ref, .value = switch (ty.signedness) { - .signed => Value{ .int64 = @intCast(i64, value) }, - .unsigned => Value{ .uint64 = @intCast(u64, value) }, + .signed => Value{ .int64 = @as(i64, @intCast(value)) }, + .unsigned => Value{ .uint64 = @as(u64, @intCast(value)) }, }, } }); } @@ -516,7 +516,7 @@ pub fn allocDecl(self: *Module, kind: DeclKind) !Decl.Index { .begin_dep = undefined, .end_dep = undefined, }); - const index = @enumFromInt(Decl.Index, @intCast(u32, self.decls.items.len - 1)); + const index = @as(Decl.Index, @enumFromInt(@as(u32, @intCast(self.decls.items.len - 1)))); switch (kind) { .func => {}, // If the decl represents a global, also allocate a global node. @@ -540,9 +540,9 @@ pub fn globalPtr(self: *Module, index: Decl.Index) ?*Global { /// Declare ALL dependencies for a decl. pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void { - const begin_dep = @intCast(u32, self.decl_deps.items.len); + const begin_dep = @as(u32, @intCast(self.decl_deps.items.len)); try self.decl_deps.appendSlice(self.gpa, deps); - const end_dep = @intCast(u32, self.decl_deps.items.len); + const end_dep = @as(u32, @intCast(self.decl_deps.items.len)); const decl = self.declPtr(decl_index); decl.begin_dep = begin_dep; @@ -550,13 +550,13 @@ pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl } pub fn beginGlobal(self: *Module) u32 { - return @intCast(u32, self.globals.section.instructions.items.len); + return @as(u32, @intCast(self.globals.section.instructions.items.len)); } pub fn endGlobal(self: *Module, global_index: Decl.Index, begin_inst: u32) void { const global = self.globalPtr(global_index).?; global.begin_inst = begin_inst; - global.end_inst = @intCast(u32, self.globals.section.instructions.items.len); + global.end_inst = @as(u32, @intCast(self.globals.section.instructions.items.len)); } pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8) !void { diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig index b35dc489e420..ae88dc7c8a94 100644 --- a/src/codegen/spirv/Section.zig +++ b/src/codegen/spirv/Section.zig @@ -50,7 +50,7 @@ pub fn emitRaw( ) !void { const word_count = 1 + operand_words; try section.instructions.ensureUnusedCapacity(allocator, word_count); - section.writeWord((@intCast(Word, word_count << 16)) | @intFromEnum(opcode)); + section.writeWord((@as(Word, @intCast(word_count << 16))) | @intFromEnum(opcode)); } pub fn emit( @@ -61,7 +61,7 @@ pub fn emit( ) !void { const word_count = instructionSize(opcode, operands); try section.instructions.ensureUnusedCapacity(allocator, word_count); - section.writeWord(@intCast(Word, word_count << 16) | @intFromEnum(opcode)); + section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode)); section.writeOperands(opcode.Operands(), operands); } @@ -94,8 +94,8 @@ pub fn writeWords(section: *Section, words: []const Word) void { pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void { section.writeWords(&.{ - @truncate(Word, dword), - @truncate(Word, dword >> @bitSizeOf(Word)), + @as(Word, @truncate(dword)), + @as(Word, @truncate(dword >> @bitSizeOf(Word))), }); } @@ -145,7 +145,7 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) }, .Struct => |info| { if (info.layout == .Packed) { - section.writeWord(@bitCast(Word, operand)); + section.writeWord(@as(Word, @bitCast(operand))); } else { section.writeExtendedMask(Operand, operand); } @@ -166,7 +166,7 @@ fn writeString(section: *Section, str: []const u8) void { var j: usize = 0; while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) { - word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * @bitSizeOf(u8)); + word |= @as(Word, str[i + j]) << @as(Log2Word, @intCast(j * @bitSizeOf(u8))); } section.instructions.appendAssumeCapacity(word); @@ -175,12 +175,12 @@ fn writeString(section: *Section, str: []const u8) void { fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void { switch (operand) { - .int32 => |int| section.writeWord(@bitCast(Word, int)), - .uint32 => |int| section.writeWord(@bitCast(Word, int)), - .int64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)), - .uint64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)), - .float32 => |float| section.writeWord(@bitCast(Word, float)), - .float64 => |float| section.writeDoubleWord(@bitCast(DoubleWord, float)), + .int32 => |int| section.writeWord(@as(Word, @bitCast(int))), + .uint32 => |int| section.writeWord(@as(Word, @bitCast(int))), + .int64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))), + .uint64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))), + .float32 => |float| section.writeWord(@as(Word, @bitCast(float))), + .float64 => |float| section.writeDoubleWord(@as(DoubleWord, @bitCast(float))), } } @@ -189,10 +189,10 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| { switch (@typeInfo(field.type)) { .Optional => if (@field(operand, field.name) != null) { - mask |= 1 << @intCast(u5, bit); + mask |= 1 << @as(u5, @intCast(bit)); }, .Bool => if (@field(operand, field.name)) { - mask |= 1 << @intCast(u5, bit); + mask |= 1 << @as(u5, @intCast(bit)); }, else => unreachable, } @@ -392,7 +392,7 @@ test "SPIR-V Section emit() - extended mask" { (@as(Word, 5) << 16) | @intFromEnum(Opcode.OpLoopMerge), 10, 20, - @bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }), + @as(Word, @bitCast(spec.LoopControl{ .Unroll = true, .DependencyLength = true })), 2, }, section.instructions.items); } diff --git a/src/crash_report.zig b/src/crash_report.zig index cb468c101f82..fc41528321c7 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -204,49 +204,49 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any const stack_ctx: StackContext = switch (builtin.cpu.arch) { .x86 => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]); - const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP])); + const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP])); break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, .x86_64 => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (builtin.os.tag) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]), - .freebsd => @intCast(usize, ctx.mcontext.rip), - .openbsd => @intCast(usize, ctx.sc_rip), - .macos => @intCast(usize, ctx.mcontext.ss.rip), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.rip)), + .openbsd => @as(usize, @intCast(ctx.sc_rip)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)), else => unreachable, }; const bp = switch (builtin.os.tag) { - .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]), - .openbsd => @intCast(usize, ctx.sc_rbp), - .freebsd => @intCast(usize, ctx.mcontext.rbp), - .macos => @intCast(usize, ctx.mcontext.ss.rbp), + .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])), + .openbsd => @as(usize, @intCast(ctx.sc_rbp)), + .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)), + .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)), else => unreachable, }; break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, .arm => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); - const ip = @intCast(usize, ctx.mcontext.arm_pc); - const bp = @intCast(usize, ctx.mcontext.arm_fp); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); + const ip = @as(usize, @intCast(ctx.mcontext.arm_pc)); + const bp = @as(usize, @intCast(ctx.mcontext.arm_fp)); break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, .aarch64 => ctx: { - const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); + const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); const ip = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.pc), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr), - else => @intCast(usize, ctx.mcontext.pc), + .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)), + else => @as(usize, @intCast(ctx.mcontext.pc)), }; // x29 is the ABI-designated frame pointer const bp = switch (native_os) { - .macos => @intCast(usize, ctx.mcontext.ss.fp), - .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]), - .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]), - else => @intCast(usize, ctx.mcontext.regs[29]), + .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)), + .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])), + .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])), + else => @as(usize, @intCast(ctx.mcontext.regs[29])), }; break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } }; }, diff --git a/src/glibc.zig b/src/glibc.zig index bb38c2c987d4..cf12e8ea46bf 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -779,13 +779,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !vo // Test whether the inclusion applies to our current library and target. const ok_lib_and_target = (lib_index == lib_i) and - ((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0); + ((targets & (@as(u32, 1) << @as(u5, @intCast(target_targ_index)))) != 0); while (true) { const byte = metadata.inclusions[inc_i]; inc_i += 1; const last = (byte & 0b1000_0000) != 0; - const ver_i = @truncate(u7, byte); + const ver_i = @as(u7, @truncate(byte)); if (ok_lib_and_target and ver_i <= target_ver_index) { versions_buffer[versions_len] = ver_i; versions_len += 1; @@ -913,13 +913,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !vo // Test whether the inclusion applies to our current library and target. const ok_lib_and_target = (lib_index == lib_i) and - ((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0); + ((targets & (@as(u32, 1) << @as(u5, @intCast(target_targ_index)))) != 0); while (true) { const byte = metadata.inclusions[inc_i]; inc_i += 1; const last = (byte & 0b1000_0000) != 0; - const ver_i = @truncate(u7, byte); + const ver_i = @as(u7, @truncate(byte)); if (ok_lib_and_target and ver_i <= target_ver_index) { versions_buffer[versions_len] = ver_i; versions_len += 1; diff --git a/src/link/C.zig b/src/link/C.zig index 9a42daa0610f..e3f8653852a8 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -292,7 +292,7 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo { var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); - try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len)); + try export_names.ensureTotalCapacity(gpa, @as(u32, @intCast(module.decl_exports.entries.len))); for (module.decl_exports.values()) |exports| for (exports.items) |@"export"| try export_names.put(gpa, @"export".opts.name, {}); @@ -426,7 +426,7 @@ fn flushCTypes( return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count]; } }; - const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i); + const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i)); const ctx = Context{ .arena = global_ctypes.arena.allocator(), .ctypes_map = f.ctypes_map.items, @@ -437,7 +437,7 @@ fn flushCTypes( .store = &global_ctypes.set, }); const global_idx = - @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index); + @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index)); f.ctypes_map.appendAssumeCapacity(global_idx); if (!gop.found_existing) { errdefer _ = global_ctypes.set.map.pop(); @@ -538,7 +538,7 @@ fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) Flush fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void { const gpa = self.base.allocator; - try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count())); + try f.lazy_fns.ensureUnusedCapacity(gpa, @as(Flush.LazyFns.Size, @intCast(lazy_fns.count()))); var it = lazy_fns.iterator(); while (it.next()) |entry| { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index e3fcc941eb46..a724d4023aa8 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -358,7 +358,7 @@ fn populateMissingMetadata(self: *Coff) !void { }); if (self.text_section_index == null) { - const file_size = @intCast(u32, self.base.options.program_code_size_hint); + const file_size = @as(u32, @intCast(self.base.options.program_code_size_hint)); self.text_section_index = try self.allocateSection(".text", file_size, .{ .CNT_CODE = 1, .MEM_EXECUTE = 1, @@ -367,7 +367,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.got_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size(); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size(); self.got_section_index = try self.allocateSection(".got", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_READ = 1, @@ -392,7 +392,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.idata_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size(); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size(); self.idata_section_index = try self.allocateSection(".idata", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_READ = 1, @@ -400,7 +400,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.reloc_section_index == null) { - const file_size = @intCast(u32, self.base.options.symbol_count_hint) * @sizeOf(coff.BaseRelocation); + const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * @sizeOf(coff.BaseRelocation); self.reloc_section_index = try self.allocateSection(".reloc", file_size, .{ .CNT_INITIALIZED_DATA = 1, .MEM_DISCARDABLE = 1, @@ -409,7 +409,7 @@ fn populateMissingMetadata(self: *Coff) !void { } if (self.strtab_offset == null) { - const file_size = @intCast(u32, self.strtab.len()); + const file_size = @as(u32, @intCast(self.strtab.len())); self.strtab_offset = self.findFreeSpace(file_size, @alignOf(u32)); // 4bytes aligned seems like a good idea here log.debug("found strtab free space 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + file_size }); } @@ -430,7 +430,7 @@ fn populateMissingMetadata(self: *Coff) !void { } fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.SectionHeaderFlags) !u16 { - const index = @intCast(u16, self.sections.slice().len); + const index = @as(u16, @intCast(self.sections.slice().len)); const off = self.findFreeSpace(size, default_file_alignment); // Memory is always allocated in sequence // TODO: investigate if we can allocate .text last; this way it would never need to grow in memory! @@ -652,7 +652,7 @@ pub fn allocateSymbol(self: *Coff) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); break :blk index; } @@ -680,7 +680,7 @@ fn allocateGlobal(self: *Coff) !u32 { break :blk index; } else { log.debug(" (allocating global index {d})", .{self.globals.items.len}); - const index = @intCast(u32, self.globals.items.len); + const index = @as(u32, @intCast(self.globals.items.len)); _ = self.globals.addOneAssumeCapacity(); break :blk index; } @@ -704,7 +704,7 @@ fn addGotEntry(self: *Coff, target: SymbolWithLoc) !void { pub fn createAtom(self: *Coff) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const sym_index = try self.allocateSymbol(); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index); @@ -776,7 +776,7 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void { self.resolveRelocs(atom_index, relocs.items, mem_code, slide); const vaddr = sym.value + slide; - const pvaddr = @ptrFromInt(*anyopaque, vaddr); + const pvaddr = @as(*anyopaque, @ptrFromInt(vaddr)); log.debug("writing to memory at address {x}", .{vaddr}); @@ -830,7 +830,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { const sect_id = self.got_section_index.?; if (self.got_table_count_dirty) { - const needed_size = @intCast(u32, self.got_table.entries.items.len * self.ptr_width.size()); + const needed_size = @as(u32, @intCast(self.got_table.entries.items.len * self.ptr_width.size())); try self.growSection(sect_id, needed_size); self.got_table_count_dirty = false; } @@ -847,7 +847,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { switch (self.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + self.getImageBase())); + mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + self.getImageBase()))); try self.base.file.?.pwriteAll(&buf, file_offset); }, .p64 => { @@ -862,7 +862,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { const gpa = self.base.allocator; const slide = @intFromPtr(self.hot_state.loaded_base_address.?); const actual_vmaddr = vmaddr + slide; - const pvaddr = @ptrFromInt(*anyopaque, actual_vmaddr); + const pvaddr = @as(*anyopaque, @ptrFromInt(actual_vmaddr)); log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr}); if (build_options.enable_logging) { switch (self.ptr_width) { @@ -880,7 +880,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { switch (self.ptr_width) { .p32 => { var buf: [4]u8 = undefined; - mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + slide)); + mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + slide))); writeMem(handle, pvaddr, &buf) catch |err| { log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)}); }; @@ -1107,7 +1107,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom = self.getAtom(atom_index); const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, sym_name); - sym.section_number = @enumFromInt(coff.SectionNumber, self.rdata_section_index.? + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.rdata_section_index.? + 1)); } const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{ @@ -1125,7 +1125,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); errdefer self.freeAtom(atom_index); @@ -1241,10 +1241,10 @@ fn updateLazySymbolAtom( }, }; - const code_len = @intCast(u32, code.len); + const code_len = @as(u32, @intCast(code.len)); const symbol = atom.getSymbolPtr(self); try self.setSymbolName(symbol, name); - symbol.section_number = @enumFromInt(coff.SectionNumber, section_index + 1); + symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1)); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); @@ -1336,12 +1336,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple const atom = self.getAtom(atom_index); const sym_index = atom.getSymbolIndex().?; const sect_index = decl_metadata.section; - const code_len = @intCast(u32, code.len); + const code_len = @as(u32, @intCast(code.len)); if (atom.size != 0) { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); - sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const capacity = atom.capacity(self); @@ -1365,7 +1365,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple } else { const sym = atom.getSymbolPtr(self); try self.setSymbolName(sym, decl_name); - sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); @@ -1502,7 +1502,7 @@ pub fn updateDeclExports( const sym = self.getSymbolPtr(sym_loc); try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name)); sym.value = decl_sym.value; - sym.section_number = @enumFromInt(coff.SectionNumber, self.text_section_index.? + 1); + sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.text_section_index.? + 1)); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; switch (exp.opts.linkage) { @@ -1728,12 +1728,12 @@ pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link try Atom.addRelocation(self, atom_index, .{ .type = .direct, .target = target, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .addend = reloc_info.addend, .pcrel = false, .length = 3, }); - try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset)); + try Atom.addBaseRelocation(self, atom_index, @as(u32, @intCast(reloc_info.offset))); return 0; } @@ -1804,7 +1804,7 @@ fn writeBaseRelocations(self: *Coff) !void { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); } try gop.value_ptr.append(.{ - .offset = @intCast(u12, rva - page), + .offset = @as(u12, @intCast(rva - page)), .type = .DIR64, }); } @@ -1818,14 +1818,14 @@ fn writeBaseRelocations(self: *Coff) !void { const sym = self.getSymbol(entry); if (sym.section_number == .UNDEFINED) continue; - const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); + const rva = @as(u32, @intCast(header.virtual_address + index * self.ptr_width.size())); const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); } try gop.value_ptr.append(.{ - .offset = @intCast(u12, rva - page), + .offset = @as(u12, @intCast(rva - page)), .type = .DIR64, }); } @@ -1860,9 +1860,9 @@ fn writeBaseRelocations(self: *Coff) !void { }); } - const block_size = @intCast( + const block_size = @as( u32, - entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry), + @intCast(entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry)), ); try buffer.ensureUnusedCapacity(block_size); buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{ @@ -1873,7 +1873,7 @@ fn writeBaseRelocations(self: *Coff) !void { } const header = &self.sections.items(.header)[self.reloc_section_index.?]; - const needed_size = @intCast(u32, buffer.items.len); + const needed_size = @as(u32, @intCast(buffer.items.len)); try self.growSection(self.reloc_section_index.?, needed_size); try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data); @@ -1904,12 +1904,12 @@ fn writeImportTables(self: *Coff) !void { const itable = self.import_tables.values()[i]; iat_size += itable.size() + 8; dir_table_size += @sizeOf(coff.ImportDirectoryEntry); - lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); + lookup_table_size += @as(u32, @intCast(itable.entries.items.len + 1)) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @as(u32, @intCast(sym_name.len + 1)), 2); } - dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); + dll_names_size += @as(u32, @intCast(lib_name.len + ext.len + 1)); } const needed_size = iat_size + dir_table_size + lookup_table_size + names_table_size + dll_names_size; @@ -1948,7 +1948,7 @@ fn writeImportTables(self: *Coff) !void { const import_name = self.getSymbolName(entry); // IAT and lookup table entry - const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @intCast(u31, header.virtual_address + names_table_offset) }; + const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @as(u31, @intCast(header.virtual_address + names_table_offset)) }; @memcpy( buffer.items[iat_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)], mem.asBytes(&lookup), @@ -1964,7 +1964,7 @@ fn writeImportTables(self: *Coff) !void { mem.writeIntLittle(u16, buffer.items[names_table_offset..][0..2], 0); // Hint set to 0 until we learn how to parse DLLs names_table_offset += 2; @memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name); - names_table_offset += @intCast(u32, import_name.len); + names_table_offset += @as(u32, @intCast(import_name.len)); buffer.items[names_table_offset] = 0; names_table_offset += 1; if (!mem.isAlignedGeneric(usize, names_table_offset, @sizeOf(u16))) { @@ -1986,9 +1986,9 @@ fn writeImportTables(self: *Coff) !void { // DLL name @memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name); - dll_names_offset += @intCast(u32, lib_name.len); + dll_names_offset += @as(u32, @intCast(lib_name.len)); @memcpy(buffer.items[dll_names_offset..][0..ext.len], ext); - dll_names_offset += @intCast(u32, ext.len); + dll_names_offset += @as(u32, @intCast(ext.len)); buffer.items[dll_names_offset] = 0; dll_names_offset += 1; } @@ -2027,11 +2027,11 @@ fn writeStrtab(self: *Coff) !void { if (self.strtab_offset == null) return; const allocated_size = self.allocatedSize(self.strtab_offset.?); - const needed_size = @intCast(u32, self.strtab.len()); + const needed_size = @as(u32, @intCast(self.strtab.len())); if (needed_size > allocated_size) { self.strtab_offset = null; - self.strtab_offset = @intCast(u32, self.findFreeSpace(needed_size, @alignOf(u32))); + self.strtab_offset = @as(u32, @intCast(self.findFreeSpace(needed_size, @alignOf(u32)))); } log.debug("writing strtab from 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + needed_size }); @@ -2042,7 +2042,7 @@ fn writeStrtab(self: *Coff) !void { buffer.appendSliceAssumeCapacity(self.strtab.items()); // Here, we do a trick in that we do not commit the size of the strtab to strtab buffer, instead // we write the length of the strtab to a temporary buffer that goes to file. - mem.writeIntLittle(u32, buffer.items[0..4], @intCast(u32, self.strtab.len())); + mem.writeIntLittle(u32, buffer.items[0..4], @as(u32, @intCast(self.strtab.len()))); try self.base.file.?.pwriteAll(buffer.items, self.strtab_offset.?); } @@ -2081,11 +2081,11 @@ fn writeHeader(self: *Coff) !void { } const timestamp = std.time.timestamp(); - const size_of_optional_header = @intCast(u16, self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize()); + const size_of_optional_header = @as(u16, @intCast(self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize())); var coff_header = coff.CoffHeader{ .machine = coff.MachineType.fromTargetCpuArch(self.base.options.target.cpu.arch), - .number_of_sections = @intCast(u16, self.sections.slice().len), // TODO what if we prune a section - .time_date_stamp = @truncate(u32, @bitCast(u64, timestamp)), + .number_of_sections = @as(u16, @intCast(self.sections.slice().len)), // TODO what if we prune a section + .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))), .pointer_to_symbol_table = self.strtab_offset orelse 0, .number_of_symbols = 0, .size_of_optional_header = size_of_optional_header, @@ -2135,7 +2135,7 @@ fn writeHeader(self: *Coff) !void { .address_of_entry_point = self.entry_addr orelse 0, .base_of_code = base_of_code, .base_of_data = base_of_data, - .image_base = @intCast(u32, image_base), + .image_base = @as(u32, @intCast(image_base)), .section_alignment = self.page_size, .file_alignment = default_file_alignment, .major_operating_system_version = 6, @@ -2155,7 +2155,7 @@ fn writeHeader(self: *Coff) !void { .size_of_heap_reserve = default_size_of_heap_reserve, .size_of_heap_commit = default_size_of_heap_commit, .loader_flags = 0, - .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len), + .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)), }; writer.writeAll(mem.asBytes(&opt_header)) catch unreachable; }, @@ -2189,7 +2189,7 @@ fn writeHeader(self: *Coff) !void { .size_of_heap_reserve = default_size_of_heap_reserve, .size_of_heap_commit = default_size_of_heap_commit, .loader_flags = 0, - .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len), + .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)), }; writer.writeAll(mem.asBytes(&opt_header)) catch unreachable; }, @@ -2210,7 +2210,7 @@ fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 { const end = start + padToIdeal(size); if (self.strtab_offset) |off| { - const tight_size = @intCast(u32, self.strtab.len()); + const tight_size = @as(u32, @intCast(self.strtab.len())); const increased_size = padToIdeal(tight_size); const test_end = off + increased_size; if (end > off and start < test_end) { @@ -2265,28 +2265,28 @@ fn allocatedVirtualSize(self: *Coff, start: u32) u32 { inline fn getSizeOfHeaders(self: Coff) u32 { const msdos_hdr_size = msdos_stub.len + 4; - return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() + - self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize()); + return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() + + self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize())); } inline fn getOptionalHeaderSize(self: Coff) u32 { return switch (self.ptr_width) { - .p32 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE32)), - .p64 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE64)), + .p32 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE32))), + .p64 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE64))), }; } inline fn getDataDirectoryHeadersSize(self: Coff) u32 { - return @intCast(u32, self.data_directories.len * @sizeOf(coff.ImageDataDirectory)); + return @as(u32, @intCast(self.data_directories.len * @sizeOf(coff.ImageDataDirectory))); } inline fn getSectionHeadersSize(self: Coff) u32 { - return @intCast(u32, self.sections.slice().len * @sizeOf(coff.SectionHeader)); + return @as(u32, @intCast(self.sections.slice().len * @sizeOf(coff.SectionHeader))); } inline fn getDataDirectoryHeadersOffset(self: Coff) u32 { const msdos_hdr_size = msdos_stub.len + 4; - return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize()); + return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize())); } inline fn getSectionHeadersOffset(self: Coff) u32 { @@ -2473,7 +2473,7 @@ fn logSymtab(self: *Coff) void { }; log.debug(" %{d}: {?s} @{x} in {s}({d}), {s}", .{ sym_id, - self.getSymbolName(.{ .sym_index = @intCast(u32, sym_id), .file = null }), + self.getSymbolName(.{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }), sym.value, where, def_index, diff --git a/src/link/Coff/ImportTable.zig b/src/link/Coff/ImportTable.zig index c3ba77e85518..c25851fe72c3 100644 --- a/src/link/Coff/ImportTable.zig +++ b/src/link/Coff/ImportTable.zig @@ -38,7 +38,7 @@ pub fn deinit(itab: *ImportTable, allocator: Allocator) void { /// Size of the import table does not include the sentinel. pub fn size(itab: ImportTable) u32 { - return @intCast(u32, itab.entries.items.len) * @sizeOf(u64); + return @as(u32, @intCast(itab.entries.items.len)) * @sizeOf(u64); } pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc) !ImportIndex { @@ -49,7 +49,7 @@ pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc break :blk index; } else { log.debug(" (allocating import entry at index {d})", .{itab.entries.items.len}); - const index = @intCast(u32, itab.entries.items.len); + const index = @as(u32, @intCast(itab.entries.items.len)); _ = itab.entries.addOneAssumeCapacity(); break :blk index; } @@ -73,7 +73,7 @@ fn getBaseAddress(ctx: Context) u32 { var addr = header.virtual_address; for (ctx.coff_file.import_tables.values(), 0..) |other_itab, i| { if (ctx.index == i) break; - addr += @intCast(u32, other_itab.entries.items.len * @sizeOf(u64)) + 8; + addr += @as(u32, @intCast(other_itab.entries.items.len * @sizeOf(u64))) + 8; } return addr; } diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig index 10d4eed92b8b..ded74836671f 100644 --- a/src/link/Coff/Relocation.zig +++ b/src/link/Coff/Relocation.zig @@ -126,23 +126,23 @@ fn resolveAarch64(self: Relocation, ctx: Context) void { var buffer = ctx.code[self.offset..]; switch (self.type) { .got_page, .import_page, .page => { - const source_page = @intCast(i32, ctx.source_vaddr >> 12); - const target_page = @intCast(i32, ctx.target_vaddr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - source_page)); + const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12)); + const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12)); + const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page)))); var inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload( aarch64.Instruction, aarch64.Instruction.pc_relative_address, ), buffer[0..4]), }; - inst.pc_relative_address.immhi = @truncate(u19, pages >> 2); - inst.pc_relative_address.immlo = @truncate(u2, pages); + inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); + inst.pc_relative_address.immlo = @as(u2, @truncate(pages)); mem.writeIntLittle(u32, buffer[0..4], inst.toU32()); }, .got_pageoff, .import_pageoff, .pageoff => { assert(!self.pcrel); - const narrowed = @truncate(u12, @intCast(u64, ctx.target_vaddr)); + const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr)))); if (isArithmeticOp(buffer[0..4])) { var inst = aarch64.Instruction{ .add_subtract_immediate = mem.bytesToValue(meta.TagPayload( @@ -182,7 +182,7 @@ fn resolveAarch64(self: Relocation, ctx: Context) void { 2 => mem.writeIntLittle( u32, buffer[0..4], - @truncate(u32, ctx.target_vaddr + ctx.image_base), + @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)), ), 3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base), else => unreachable, @@ -206,17 +206,17 @@ fn resolveX86(self: Relocation, ctx: Context) void { .got, .import => { assert(self.pcrel); - const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4; + const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4; mem.writeIntLittle(i32, buffer[0..4], disp); }, .direct => { if (self.pcrel) { - const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4; + const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4; mem.writeIntLittle(i32, buffer[0..4], disp); } else switch (ctx.ptr_width) { - .p32 => mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, ctx.target_vaddr + ctx.image_base)), + .p32 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base))), .p64 => switch (self.length) { - 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, ctx.target_vaddr + ctx.image_base)), + 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base))), 3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base), else => unreachable, }, @@ -226,6 +226,6 @@ fn resolveX86(self: Relocation, ctx: Context) void { } inline fn isArithmeticOp(inst: *const [4]u8) bool { - const group_decode = @truncate(u5, inst[3]); + const group_decode = @as(u5, @truncate(inst[3])); return ((group_decode >> 2) == 4); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 14be46b62174..499855b330ae 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -138,7 +138,7 @@ pub const DeclState = struct { /// which we use as our target of the relocation. fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void { const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: { - const sym_index = @intCast(u32, self.abbrev_table.items.len); + const sym_index = @as(u32, @intCast(self.abbrev_table.items.len)); try self.abbrev_table.append(self.gpa, .{ .atom_index = atom_index, .type = ty, @@ -225,7 +225,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, Type.bool, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); dbg_info_buffer.appendAssumeCapacity(0); @@ -237,7 +237,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata const offset = abi_size - payload_ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), offset); @@ -249,7 +249,7 @@ pub const DeclState = struct { if (ty.isSlice(mod)) { // Slices are structs: struct { .ptr = *, .len = N } const ptr_bits = target.ptrBitWidth(); - const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); + const ptr_bytes = @as(u8, @intCast(@divExact(ptr_bits, 8))); // DW.AT.structure_type try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_type)); @@ -267,7 +267,7 @@ pub const DeclState = struct { var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); const ptr_ty = ty.slicePtrFieldType(mod); - try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ptr_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); dbg_info_buffer.appendAssumeCapacity(0); @@ -279,7 +279,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(ptr_bytes); @@ -291,7 +291,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index))); } }, .Array => { @@ -302,13 +302,13 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index))); // DW.AT.subrange_type try dbg_info_buffer.append(@intFromEnum(AbbrevKind.array_dim)); // DW.AT.type, DW.FORM.ref4 index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index))); // DW.AT.count, DW.FORM.udata const len = ty.arrayLenIncludingSentinel(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), len); @@ -334,7 +334,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); @@ -367,7 +367,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); @@ -404,7 +404,7 @@ pub const DeclState = struct { // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 const field_int_val = try value.toValue().intFromEnum(ty, mod); - break :value @bitCast(u64, field_int_val.toSignedInt(mod)); + break :value @as(u64, @bitCast(field_int_val.toSignedInt(mod))); }; mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -439,7 +439,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const inner_union_index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(inner_union_index + 4); - try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5); + try self.addTypeRelocLocal(atom_index, @as(u32, @intCast(inner_union_index)), 5); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset); } @@ -468,7 +468,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.append(0); } @@ -485,7 +485,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset); @@ -521,7 +521,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off); } @@ -536,7 +536,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, error_ty, @as(u32, @intCast(index))); // DW.AT.data_member_location, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), error_off); } @@ -640,7 +640,7 @@ pub const DeclState = struct { try dbg_info.ensureUnusedCapacity(5 + name_with_null.len); const index = dbg_info.items.len; try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4 - try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4 + try self.addTypeRelocGlobal(atom_index, ty, @as(u32, @intCast(index))); // DW.AT.type, DW.FORM.ref4 dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string } @@ -723,20 +723,20 @@ pub const DeclState = struct { .memory, .linker_load, => { - const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8)); + const ptr_width = @as(u8, @intCast(@divExact(target.ptrBitWidth(), 8))); try dbg_info.ensureUnusedCapacity(2 + ptr_width); dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1 + ptr_width + @intFromBool(is_ptr), DW.OP.addr, // literal address }); - const offset = @intCast(u32, dbg_info.items.len); + const offset = @as(u32, @intCast(dbg_info.items.len)); const addr = switch (loc) { .memory => |x| x, else => 0, }; switch (ptr_width) { 0...4 => { - try dbg_info.writer().writeInt(u32, @intCast(u32, addr), endian); + try dbg_info.writer().writeInt(u32, @as(u32, @intCast(addr)), endian); }, 5...8 => { try dbg_info.writer().writeInt(u64, addr, endian); @@ -765,19 +765,19 @@ pub const DeclState = struct { if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, }); if (child_ty.isSignedInt(mod)) { - try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)); + try leb128.writeILEB128(dbg_info.writer(), @as(i64, @bitCast(x))); } else { try leb128.writeULEB128(dbg_info.writer(), x); } try dbg_info.append(DW.OP.stack_value); - dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2); + dbg_info.items[fixup] += @as(u8, @intCast(dbg_info.items.len - fixup - 2)); }, .undef => { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size = @intCast(u32, child_ty.abiSize(mod)); + const abi_size = @as(u32, @intCast(child_ty.abiSize(mod))); var implicit_value_len = std.ArrayList(u8).init(self.gpa); defer implicit_value_len.deinit(); try leb128.writeULEB128(implicit_value_len.writer(), abi_size); @@ -807,7 +807,7 @@ pub const DeclState = struct { try dbg_info.ensureUnusedCapacity(5 + name_with_null.len); const index = dbg_info.items.len; try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4 - try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, child_ty, @as(u32, @intCast(index))); dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string } @@ -963,7 +963,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) func.lbrace_line, func.rbrace_line, }); - const line = @intCast(u28, decl.src_line + func.lbrace_line); + const line = @as(u28, @intCast(decl.src_line + func.lbrace_line)); const ptr_width_bytes = self.ptrWidthBytes(); dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ @@ -1013,7 +1013,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4 // if (fn_ret_has_bits) { - try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len)); + try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @as(u32, @intCast(dbg_info_buffer.items.len))); dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4 } @@ -1055,11 +1055,11 @@ pub fn commitDeclState( .p32 => { { const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian); + mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian); } { const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian); + mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian); } }, .p64 => { @@ -1079,7 +1079,7 @@ pub fn commitDeclState( sym_size, }); const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, sym_size), target_endian); + mem.writeInt(u32, ptr, @as(u32, @intCast(sym_size)), target_endian); } try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence }); @@ -1091,7 +1091,7 @@ pub fn commitDeclState( // probably need to edit that logic too. const src_fn_index = self.src_fn_decls.get(decl_index).?; const src_fn = self.getAtomPtr(.src_fn, src_fn_index); - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + src_fn.len = @as(u32, @intCast(dbg_line_buffer.items.len)); if (self.src_fn_last_index) |last_index| blk: { if (src_fn_index == last_index) break :blk; @@ -1254,12 +1254,12 @@ pub fn commitDeclState( }; if (deferred) continue; - symbol.offset = @intCast(u32, dbg_info_buffer.items.len); + symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len)); try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } - try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); + try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); while (decl_state.abbrev_relocs.popOrNull()) |reloc| { if (reloc.target) |target| { @@ -1402,7 +1402,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) self.di_atom_first_index = atom_index; self.di_atom_last_index = atom_index; - atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes())); + atom.off = @as(u32, @intCast(padToIdeal(self.dbgInfoHeaderBytes()))); } } @@ -1513,7 +1513,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl. func.lbrace_line, func.rbrace_line, }); - const line = @intCast(u28, decl.src_line + func.lbrace_line); + const line = @as(u28, @intCast(decl.src_line + func.lbrace_line)); var data: [4]u8 = undefined; leb128.writeUnsignedFixed(4, &data, line); @@ -1791,10 +1791,10 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u const dbg_info_end = self.getDebugInfoEnd().? + 1; const init_len = dbg_info_end - after_init_len; if (self.bin_file.tag == .macho) { - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len)); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len))); } else switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len), target_endian); + mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)), target_endian); }, .p64 => { di_buf.appendNTimesAssumeCapacity(0xff, 4); @@ -1804,11 +1804,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version const abbrev_offset = self.abbrev_table_offset.?; if (self.bin_file.tag == .macho) { - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset)); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset))); di_buf.appendAssumeCapacity(8); // address size } else switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset), target_endian); + mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset)), target_endian); di_buf.appendAssumeCapacity(4); // address size }, .p64 => { @@ -1828,9 +1828,9 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT.stmt_list, DW.FORM.sec_offset mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), low_pc); mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), high_pc); - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, name_strp)); - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, comp_dir_strp)); - mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, producer_strp)); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(name_strp))); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(comp_dir_strp))); + mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(producer_strp))); } else { self.writeAddrAssumeCapacity(&di_buf, 0); // DW.AT.stmt_list, DW.FORM.sec_offset self.writeAddrAssumeCapacity(&di_buf, low_pc); @@ -1885,7 +1885,7 @@ fn resolveCompilationDir(module: *Module, buffer: *[std.fs.MAX_PATH_BYTES]u8) [] fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void { const target_endian = self.target.cpu.arch.endian(); switch (self.ptr_width) { - .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian), + .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian), .p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian), } } @@ -2152,10 +2152,10 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { // Go back and populate the initial length. const init_len = di_buf.items.len - after_init_len; if (self.bin_file.tag == .macho) { - mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len)); + mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len))); } else switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len), target_endian); + mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len)), target_endian); }, .p64 => { // initial length - length of the .debug_aranges contribution for this compilation unit, @@ -2165,7 +2165,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { }, } - const needed_size = @intCast(u32, di_buf.items.len); + const needed_size = @as(u32, @intCast(di_buf.items.len)); switch (self.bin_file.tag) { .elf => { const elf_file = self.bin_file.cast(File.Elf).?; @@ -2293,7 +2293,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { di_buf.appendSliceAssumeCapacity(file); di_buf.appendSliceAssumeCapacity(&[_]u8{ 0, // null byte for the relative path name - @intCast(u8, dir_index), // directory_index + @as(u8, @intCast(dir_index)), // directory_index 0, // mtime (TODO supply this) 0, // file size bytes (TODO supply this) }); @@ -2304,11 +2304,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { switch (self.bin_file.tag) { .macho => { - mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len)); + mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len))); }, else => switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len), target_endian); + mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len)), target_endian); }, .p64 => { mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian); @@ -2348,7 +2348,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { .macho => { const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?; const sect_index = d_sym.debug_line_section_index.?; - const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta); + const needed_size = @as(u32, @intCast(d_sym.getSection(sect_index).size + delta)); try d_sym.growSection(sect_index, needed_size, true); const file_pos = d_sym.getSection(sect_index).offset + first_fn.off; @@ -2384,11 +2384,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { const init_len = self.getDebugLineProgramEnd().? - before_init_len - init_len_size; switch (self.bin_file.tag) { .macho => { - mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len)); + mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len))); }, else => switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len), target_endian); + mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len)), target_endian); }, .p64 => { mem.writeInt(u64, di_buf.items[before_init_len + 4 ..][0..8], init_len, target_endian); @@ -2477,7 +2477,7 @@ fn dbgLineNeededHeaderBytes(self: Dwarf, dirs: []const []const u8, files: []cons } size += 1; // file names sentinel - return @intCast(u32, size); + return @as(u32, @intCast(size)); } /// The reloc offset for the line offset of a function from the previous function's line. @@ -2516,7 +2516,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); - try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); + try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); log.debug("writeDeclDebugInfo in flushModule", .{}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); @@ -2581,7 +2581,7 @@ fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 { else => unreachable, } } - return @intCast(u28, gop.index + 1); + return @as(u28, @intCast(gop.index + 1)); } fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { @@ -2614,7 +2614,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { const dir_index: u28 = blk: { const dirs_gop = dirs.getOrPutAssumeCapacity(dir_path); - break :blk @intCast(u28, dirs_gop.index + 1); + break :blk @as(u28, @intCast(dirs_gop.index + 1)); }; files_dir_indexes.appendAssumeCapacity(dir_index); @@ -2679,12 +2679,12 @@ fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index { const index = blk: { switch (kind) { .src_fn => { - const index = @intCast(Atom.Index, self.src_fns.items.len); + const index = @as(Atom.Index, @intCast(self.src_fns.items.len)); _ = try self.src_fns.addOne(self.allocator); break :blk index; }, .di_atom => { - const index = @intCast(Atom.Index, self.di_atoms.items.len); + const index = @as(Atom.Index, @intCast(self.di_atoms.items.len)); _ = try self.di_atoms.addOne(self.allocator); break :blk index; }, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 283bd9ccca4b..8d08b73d6a8c 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -455,7 +455,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { const ptr_size: u8 = self.ptrWidthBytes(); if (self.phdr_table_index == null) { - self.phdr_table_index = @intCast(u16, self.program_headers.items.len); + self.phdr_table_index = @as(u16, @intCast(self.program_headers.items.len)); const p_align: u16 = switch (self.ptr_width) { .p32 => @alignOf(elf.Elf32_Phdr), .p64 => @alignOf(elf.Elf64_Phdr), @@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_table_load_index == null) { - self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len); + self.phdr_table_load_index = @as(u16, @intCast(self.program_headers.items.len)); // TODO Same as for GOT const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000; const p_align = self.page_size; @@ -492,7 +492,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_re_index == null) { - self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len); + self.phdr_load_re_index = @as(u16, @intCast(self.program_headers.items.len)); const file_size = self.base.options.program_code_size_hint; const p_align = self.page_size; const off = self.findFreeSpace(file_size, p_align); @@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_got_index == null) { - self.phdr_got_index = @intCast(u16, self.program_headers.items.len); + self.phdr_got_index = @as(u16, @intCast(self.program_headers.items.len)); const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint; // We really only need ptr alignment but since we are using PROGBITS, linux requires // page align. @@ -538,7 +538,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_ro_index == null) { - self.phdr_load_ro_index = @intCast(u16, self.program_headers.items.len); + self.phdr_load_ro_index = @as(u16, @intCast(self.program_headers.items.len)); // TODO Find a hint about how much data need to be in rodata ? const file_size = 1024; // Same reason as for GOT @@ -561,7 +561,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_rw_index == null) { - self.phdr_load_rw_index = @intCast(u16, self.program_headers.items.len); + self.phdr_load_rw_index = @as(u16, @intCast(self.program_headers.items.len)); // TODO Find a hint about how much data need to be in data ? const file_size = 1024; // Same reason as for GOT @@ -584,7 +584,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.shstrtab_index == null) { - self.shstrtab_index = @intCast(u16, self.sections.slice().len); + self.shstrtab_index = @as(u16, @intCast(self.sections.slice().len)); assert(self.shstrtab.buffer.items.len == 0); try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0 const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1); @@ -609,7 +609,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.text_section_index == null) { - self.text_section_index = @intCast(u16, self.sections.slice().len); + self.text_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_load_re_index.?]; try self.sections.append(gpa, .{ @@ -631,7 +631,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.got_section_index == null) { - self.got_section_index = @intCast(u16, self.sections.slice().len); + self.got_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_got_index.?]; try self.sections.append(gpa, .{ @@ -653,7 +653,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.rodata_section_index == null) { - self.rodata_section_index = @intCast(u16, self.sections.slice().len); + self.rodata_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_load_ro_index.?]; try self.sections.append(gpa, .{ @@ -675,7 +675,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.data_section_index == null) { - self.data_section_index = @intCast(u16, self.sections.slice().len); + self.data_section_index = @as(u16, @intCast(self.sections.slice().len)); const phdr = &self.program_headers.items[self.phdr_load_rw_index.?]; try self.sections.append(gpa, .{ @@ -697,7 +697,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.symtab_section_index == null) { - self.symtab_section_index = @intCast(u16, self.sections.slice().len); + self.symtab_section_index = @as(u16, @intCast(self.sections.slice().len)); const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym); const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym); const file_size = self.base.options.symbol_count_hint * each_size; @@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { .sh_size = file_size, // The section header index of the associated string table. .sh_link = self.shstrtab_index.?, - .sh_info = @intCast(u32, self.local_symbols.items.len), + .sh_info = @as(u32, @intCast(self.local_symbols.items.len)), .sh_addralign = min_align, .sh_entsize = each_size, }, @@ -726,7 +726,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { if (self.dwarf) |*dw| { if (self.debug_str_section_index == null) { - self.debug_str_section_index = @intCast(u16, self.sections.slice().len); + self.debug_str_section_index = @as(u16, @intCast(self.sections.slice().len)); assert(dw.strtab.buffer.items.len == 0); try dw.strtab.buffer.append(gpa, 0); try self.sections.append(gpa, .{ @@ -749,7 +749,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_info_section_index == null) { - self.debug_info_section_index = @intCast(u16, self.sections.slice().len); + self.debug_info_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 200; const p_align = 1; @@ -778,7 +778,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_abbrev_section_index == null) { - self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len); + self.debug_abbrev_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 128; const p_align = 1; @@ -807,7 +807,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_aranges_section_index == null) { - self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len); + self.debug_aranges_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 160; const p_align = 16; @@ -836,7 +836,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.debug_line_section_index == null) { - self.debug_line_section_index = @intCast(u16, self.sections.slice().len); + self.debug_line_section_index = @as(u16, @intCast(self.sections.slice().len)); const file_size_hint = 250; const p_align = 1; @@ -1100,7 +1100,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node }); switch (self.ptr_width) { - .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@intCast(u32, target_vaddr)), file_offset), + .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@as(u32, @intCast(target_vaddr))), file_offset), .p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset), } @@ -1170,7 +1170,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node if (needed_size > allocated_size) { phdr_table.p_offset = 0; // free the space - phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); + phdr_table.p_offset = self.findFreeSpace(needed_size, @as(u32, @intCast(phdr_table.p_align))); } phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align); @@ -2004,7 +2004,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v fn writeDwarfAddrAssumeCapacity(self: *Elf, buf: *std.ArrayList(u8), addr: u64) void { const target_endian = self.base.options.target.cpu.arch.endian(); switch (self.ptr_width) { - .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian), + .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian), .p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian), } } @@ -2064,15 +2064,15 @@ fn writeElfHeader(self: *Elf) !void { const phdr_table_offset = self.program_headers.items[self.phdr_table_index.?].p_offset; switch (self.ptr_width) { .p32 => { - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(e_entry)), endian); index += 4; // e_phoff - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, phdr_table_offset), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(phdr_table_offset)), endian); index += 4; // e_shoff - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(self.shdr_table_offset.?)), endian); index += 4; }, .p64 => { @@ -2108,7 +2108,7 @@ fn writeElfHeader(self: *Elf) !void { mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian); index += 2; - const e_phnum = @intCast(u16, self.program_headers.items.len); + const e_phnum = @as(u16, @intCast(self.program_headers.items.len)); mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian); index += 2; @@ -2119,7 +2119,7 @@ fn writeElfHeader(self: *Elf) !void { mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian); index += 2; - const e_shnum = @intCast(u16, self.sections.slice().len); + const e_shnum = @as(u16, @intCast(self.sections.slice().len)); mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian); index += 2; @@ -2223,7 +2223,7 @@ fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: pub fn createAtom(self: *Elf) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const local_sym_index = try self.allocateLocalSymbol(); try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index); @@ -2367,7 +2367,7 @@ pub fn allocateLocalSymbol(self: *Elf) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.local_symbols.items.len}); - const index = @intCast(u32, self.local_symbols.items.len); + const index = @as(u32, @intCast(self.local_symbols.items.len)); _ = self.local_symbols.addOneAssumeCapacity(); break :blk index; } @@ -2557,7 +2557,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s .iov_len = code.len, }}; var remote_vec: [1]std.os.iovec_const = .{.{ - .iov_base = @ptrFromInt([*]u8, @intCast(usize, local_sym.st_value)), + .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(local_sym.st_value)))), .iov_len = code.len, }}; const rc = std.os.linux.process_vm_writev(pid, &code_vec, &remote_vec, 0); @@ -2910,7 +2910,7 @@ pub fn updateDeclExports( continue; }, }; - const stt_bits: u8 = @truncate(u4, decl_sym.st_info); + const stt_bits: u8 = @as(u4, @truncate(decl_sym.st_info)); if (decl_metadata.getExport(self, exp_name)) |i| { const sym = &self.global_symbols.items[i]; sym.* = .{ @@ -2926,7 +2926,7 @@ pub fn updateDeclExports( _ = self.global_symbols.addOneAssumeCapacity(); break :blk self.global_symbols.items.len - 1; }; - try decl_metadata.exports.append(gpa, @intCast(u32, i)); + try decl_metadata.exports.append(gpa, @as(u32, @intCast(i))); self.global_symbols.items[i] = .{ .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, @@ -3030,12 +3030,12 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void switch (entry_size) { 2 => { var buf: [2]u8 = undefined; - mem.writeInt(u16, &buf, @intCast(u16, got_value), endian); + mem.writeInt(u16, &buf, @as(u16, @intCast(got_value)), endian); try self.base.file.?.pwriteAll(&buf, off); }, 4 => { var buf: [4]u8 = undefined; - mem.writeInt(u32, &buf, @intCast(u32, got_value), endian); + mem.writeInt(u32, &buf, @as(u32, @intCast(got_value)), endian); try self.base.file.?.pwriteAll(&buf, off); }, 8 => { @@ -3051,7 +3051,7 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void .iov_len = buf.len, }}; var remote_vec: [1]std.os.iovec_const = .{.{ - .iov_base = @ptrFromInt([*]u8, @intCast(usize, vaddr)), + .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(vaddr)))), .iov_len = buf.len, }}; const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0); @@ -3086,7 +3086,7 @@ fn writeSymbol(self: *Elf, index: usize) !void { }; const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size; try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true); - syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len); + syms_sect.sh_info = @as(u32, @intCast(self.local_symbols.items.len)); } const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian(); const off = switch (self.ptr_width) { @@ -3101,8 +3101,8 @@ fn writeSymbol(self: *Elf, index: usize) !void { var sym = [1]elf.Elf32_Sym{ .{ .st_name = local.st_name, - .st_value = @intCast(u32, local.st_value), - .st_size = @intCast(u32, local.st_size), + .st_value = @as(u32, @intCast(local.st_value)), + .st_size = @as(u32, @intCast(local.st_size)), .st_info = local.st_info, .st_other = local.st_other, .st_shndx = local.st_shndx, @@ -3148,8 +3148,8 @@ fn writeAllGlobalSymbols(self: *Elf) !void { const global = self.global_symbols.items[i]; sym.* = .{ .st_name = global.st_name, - .st_value = @intCast(u32, global.st_value), - .st_size = @intCast(u32, global.st_size), + .st_value = @as(u32, @intCast(global.st_value)), + .st_size = @as(u32, @intCast(global.st_size)), .st_info = global.st_info, .st_other = global.st_other, .st_shndx = global.st_shndx, @@ -3194,19 +3194,19 @@ fn ptrWidthBytes(self: Elf) u8 { /// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes /// in a 32-bit ELF file. fn archPtrWidthBytes(self: Elf) u8 { - return @intCast(u8, self.base.options.target.ptrBitWidth() / 8); + return @as(u8, @intCast(self.base.options.target.ptrBitWidth() / 8)); } fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr { return .{ .p_type = phdr.p_type, .p_flags = phdr.p_flags, - .p_offset = @intCast(u32, phdr.p_offset), - .p_vaddr = @intCast(u32, phdr.p_vaddr), - .p_paddr = @intCast(u32, phdr.p_paddr), - .p_filesz = @intCast(u32, phdr.p_filesz), - .p_memsz = @intCast(u32, phdr.p_memsz), - .p_align = @intCast(u32, phdr.p_align), + .p_offset = @as(u32, @intCast(phdr.p_offset)), + .p_vaddr = @as(u32, @intCast(phdr.p_vaddr)), + .p_paddr = @as(u32, @intCast(phdr.p_paddr)), + .p_filesz = @as(u32, @intCast(phdr.p_filesz)), + .p_memsz = @as(u32, @intCast(phdr.p_memsz)), + .p_align = @as(u32, @intCast(phdr.p_align)), }; } @@ -3214,14 +3214,14 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr { return .{ .sh_name = shdr.sh_name, .sh_type = shdr.sh_type, - .sh_flags = @intCast(u32, shdr.sh_flags), - .sh_addr = @intCast(u32, shdr.sh_addr), - .sh_offset = @intCast(u32, shdr.sh_offset), - .sh_size = @intCast(u32, shdr.sh_size), + .sh_flags = @as(u32, @intCast(shdr.sh_flags)), + .sh_addr = @as(u32, @intCast(shdr.sh_addr)), + .sh_offset = @as(u32, @intCast(shdr.sh_offset)), + .sh_size = @as(u32, @intCast(shdr.sh_size)), .sh_link = shdr.sh_link, .sh_info = shdr.sh_info, - .sh_addralign = @intCast(u32, shdr.sh_addralign), - .sh_entsize = @intCast(u32, shdr.sh_entsize), + .sh_addralign = @as(u32, @intCast(shdr.sh_addralign)), + .sh_entsize = @as(u32, @intCast(shdr.sh_entsize)), }; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index c91d18b0f731..80195a454db5 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -741,7 +741,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No }; const sym = self.getSymbol(global); try lc_writer.writeStruct(macho.entry_point_command{ - .entryoff = @intCast(u32, sym.n_value - seg.vmaddr), + .entryoff = @as(u32, @intCast(sym.n_value - seg.vmaddr)), .stacksize = self.base.options.stack_size_override orelse 0, }); }, @@ -757,7 +757,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No }); try load_commands.writeBuildVersionLC(&self.base.options, lc_writer); - const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len); + const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len)); try lc_writer.writeStruct(self.uuid_cmd); try load_commands.writeLoadDylibLCs(self.dylibs.items, self.referenced_dylibs.keys(), lc_writer); @@ -768,7 +768,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No const ncmds = load_commands.calcNumOfLCs(lc_buffer.items); try self.base.file.?.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64)); - try self.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len)); + try self.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len))); try self.writeUuid(comp, uuid_cmd_offset, requires_codesig); if (codesig) |*csig| { @@ -992,7 +992,7 @@ pub fn parseDylib( const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null); defer gpa.free(contents); - const dylib_id = @intCast(u16, self.dylibs.items.len); + const dylib_id = @as(u16, @intCast(self.dylibs.items.len)); var dylib = Dylib{ .weak = opts.weak }; dylib.parseFromBinary( @@ -1412,7 +1412,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void { pub fn createAtom(self: *MachO) !Atom.Index { const gpa = self.base.allocator; - const atom_index = @intCast(Atom.Index, self.atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); const sym_index = try self.allocateSymbol(); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index); @@ -1588,14 +1588,14 @@ fn resolveSymbolsInDylibs(self: *MachO, actions: *std.ArrayList(ResolveAction)) for (self.dylibs.items, 0..) |dylib, id| { if (!dylib.symbols.contains(sym_name)) continue; - const dylib_id = @intCast(u16, id); + const dylib_id = @as(u16, @intCast(id)); if (!self.referenced_dylibs.contains(dylib_id)) { try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; sym.n_type |= macho.N_EXT; - sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER; + sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER; if (dylib.weak) { sym.n_desc |= macho.N_WEAK_REF; @@ -1789,7 +1789,7 @@ fn allocateSymbol(self: *MachO) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); break :blk index; } @@ -1815,7 +1815,7 @@ fn allocateGlobal(self: *MachO) !u32 { break :blk index; } else { log.debug(" (allocating symbol index {d})", .{self.globals.items.len}); - const index = @intCast(u32, self.globals.items.len); + const index = @as(u32, @intCast(self.globals.items.len)); _ = self.globals.addOneAssumeCapacity(); break :blk index; } @@ -2563,12 +2563,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil try Atom.addRelocation(self, atom_index, .{ .type = .unsigned, .target = .{ .sym_index = sym_index, .file = null }, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .addend = reloc_info.addend, .pcrel = false, .length = 3, }); - try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset)); + try Atom.addRebase(self, atom_index, @as(u32, @intCast(reloc_info.offset))); return 0; } @@ -2582,7 +2582,7 @@ fn populateMissingMetadata(self: *MachO) !void { if (self.pagezero_segment_cmd_index == null) { if (pagezero_vmsize > 0) { - self.pagezero_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.pagezero_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); try self.segments.append(gpa, .{ .segname = makeStaticString("__PAGEZERO"), .vmsize = pagezero_vmsize, @@ -2593,7 +2593,7 @@ fn populateMissingMetadata(self: *MachO) !void { if (self.header_segment_cmd_index == null) { // The first __TEXT segment is immovable and covers MachO header and load commands. - self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.header_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); @@ -2719,7 +2719,7 @@ fn populateMissingMetadata(self: *MachO) !void { } if (self.linkedit_segment_cmd_index == null) { - self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); try self.segments.append(gpa, .{ .segname = makeStaticString("__LINKEDIT"), @@ -2752,8 +2752,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts const gpa = self.base.allocator; // In incremental context, we create one section per segment pairing. This way, // we can move the segment in raw file as we please. - const segment_id = @intCast(u8, self.segments.items.len); - const section_id = @intCast(u8, self.sections.slice().len); + const segment_id = @as(u8, @intCast(self.segments.items.len)); + const section_id = @as(u8, @intCast(self.sections.slice().len)); const vmaddr = blk: { const prev_segment = self.segments.items[segment_id - 1]; break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); @@ -2788,7 +2788,7 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts .sectname = makeStaticString(sectname), .segname = makeStaticString(segname), .addr = mem.alignForward(u64, vmaddr, opts.alignment), - .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment), + .offset = mem.alignForward(u32, @as(u32, @intCast(off)), opts.alignment), .size = opts.size, .@"align" = math.log2(opts.alignment), .flags = opts.flags, @@ -2832,7 +2832,7 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { current_size, ); if (amt != current_size) return error.InputOutput; - header.offset = @intCast(u32, new_offset); + header.offset = @as(u32, @intCast(new_offset)); segment.fileoff = new_offset; } @@ -2862,7 +2862,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { // TODO: enforce order by increasing VM addresses in self.sections container. for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| { - const index = @intCast(u8, sect_id + 1 + next_sect_id); + const index = @as(u8, @intCast(sect_id + 1 + next_sect_id)); const next_segment = self.getSegmentPtr(index); next_header.addr += diff; next_segment.vmaddr += diff; @@ -2972,7 +2972,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm self.segment_table_dirty = true; } - const align_pow = @intCast(u32, math.log2(alignment)); + const align_pow = @as(u32, @intCast(math.log2(alignment))); if (header.@"align" < align_pow) { header.@"align" = align_pow; } @@ -3015,7 +3015,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u fn writeSegmentHeaders(self: *MachO, writer: anytype) !void { for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@intCast(u8, i)); + const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); try writer.writeStruct(seg); for (self.sections.items(.header)[indexes.start..indexes.end]) |header| { try writer.writeStruct(header); @@ -3029,7 +3029,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void { seg.vmsize = 0; for (self.segments.items, 0..) |segment, id| { - if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; + if (self.linkedit_segment_cmd_index.? == @as(u8, @intCast(id))) continue; if (seg.vmaddr < segment.vmaddr + segment.vmsize) { seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size); } @@ -3115,7 +3115,7 @@ fn collectBindDataFromTableSection(self: *MachO, sect_id: u8, bind: anytype, tab log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ base_offset + offset, self.getSymbolName(entry), - @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER), + @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER), }); if (bind_sym.weakRef()) { log.debug(" | marking as weak ref ", .{}); @@ -3150,7 +3150,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void { const bind_sym = self.getSymbol(binding.target); const bind_sym_name = self.getSymbolName(binding.target); const dylib_ordinal = @divTrunc( - @bitCast(i16, bind_sym.n_desc), + @as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER, ); log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ @@ -3285,14 +3285,14 @@ fn writeDyldInfoData(self: *MachO) !void { try self.base.file.?.pwriteAll(buffer, rebase_off); try self.populateLazyBindOffsetsInStubHelper(lazy_bind); - self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off); - self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned); - self.dyld_info_cmd.bind_off = @intCast(u32, bind_off); - self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned); - self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off); - self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned); - self.dyld_info_cmd.export_off = @intCast(u32, export_off); - self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned); + self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off)); + self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned)); + self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off)); + self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned)); + self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off)); + self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned)); + self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off)); + self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned)); } fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void { @@ -3337,7 +3337,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx { for (self.locals.items, 0..) |sym, sym_id| { if (sym.n_strx == 0) continue; // no name, skip - const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null }; + const sym_loc = SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }; if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip try locals.append(sym); @@ -3363,16 +3363,16 @@ fn writeSymtab(self: *MachO) !SymtabCtx { const sym = self.getSymbol(global); if (sym.n_strx == 0) continue; // no name, skip if (!sym.undf()) continue; // not an import, skip - const new_index = @intCast(u32, imports.items.len); + const new_index = @as(u32, @intCast(imports.items.len)); var out_sym = sym; out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global)); try imports.append(out_sym); try imports_table.putNoClobber(global, new_index); } - const nlocals = @intCast(u32, locals.items.len); - const nexports = @intCast(u32, exports.items.len); - const nimports = @intCast(u32, imports.items.len); + const nlocals = @as(u32, @intCast(locals.items.len)); + const nexports = @as(u32, @intCast(exports.items.len)); + const nimports = @as(u32, @intCast(imports.items.len)); const nsyms = nlocals + nexports + nimports; const seg = self.getLinkeditSegmentPtr(); @@ -3392,7 +3392,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx { log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); try self.base.file.?.pwriteAll(buffer.items, offset); - self.symtab_cmd.symoff = @intCast(u32, offset); + self.symtab_cmd.symoff = @as(u32, @intCast(offset)); self.symtab_cmd.nsyms = nsyms; return SymtabCtx{ @@ -3421,8 +3421,8 @@ fn writeStrtab(self: *MachO) !void { try self.base.file.?.pwriteAll(buffer, offset); - self.symtab_cmd.stroff = @intCast(u32, offset); - self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned); + self.symtab_cmd.stroff = @as(u32, @intCast(offset)); + self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned)); } const SymtabCtx = struct { @@ -3434,8 +3434,8 @@ const SymtabCtx = struct { fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { const gpa = self.base.allocator; - const nstubs = @intCast(u32, self.stub_table.lookup.count()); - const ngot_entries = @intCast(u32, self.got_table.lookup.count()); + const nstubs = @as(u32, @intCast(self.stub_table.lookup.count())); + const ngot_entries = @as(u32, @intCast(self.got_table.lookup.count())); const nindirectsyms = nstubs * 2 + ngot_entries; const iextdefsym = ctx.nlocalsym; const iundefsym = iextdefsym + ctx.nextdefsym; @@ -3503,7 +3503,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { self.dysymtab_cmd.nextdefsym = ctx.nextdefsym; self.dysymtab_cmd.iundefsym = iundefsym; self.dysymtab_cmd.nundefsym = ctx.nundefsym; - self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset); + self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset)); self.dysymtab_cmd.nindirectsyms = nindirectsyms; } @@ -3530,8 +3530,8 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { // except for code signature data. try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1); - self.codesig_cmd.dataoff = @intCast(u32, offset); - self.codesig_cmd.datasize = @intCast(u32, needed_size); + self.codesig_cmd.dataoff = @as(u32, @intCast(offset)); + self.codesig_cmd.datasize = @as(u32, @intCast(needed_size)); } fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature) !void { @@ -3711,7 +3711,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 { fn getSegmentByName(self: MachO, segname: []const u8) ?u8 { for (self.segments.items, 0..) |seg, i| { - if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i); + if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i)); } else return null; } @@ -3734,15 +3734,15 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8) // TODO investigate caching with a hashmap for (self.sections.items(.header), 0..) |header, i| { if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname)) - return @intCast(u8, i); + return @as(u8, @intCast(i)); } else return null; } pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } { var start: u8 = 0; const nsects = for (self.segments.items, 0..) |seg, i| { - if (i == segment_index) break @intCast(u8, seg.nsects); - start += @intCast(u8, seg.nsects); + if (i == segment_index) break @as(u8, @intCast(seg.nsects)); + start += @as(u8, @intCast(seg.nsects)); } else 0; return .{ .start = start, .end = start + nsects }; } diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index d222394ad5a1..5276bf041e06 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -169,7 +169,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) ! }; const object_offset = try symtab_reader.readIntLittle(u32); - const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + n_strx), 0); + const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + n_strx)), 0); const owned_name = try allocator.dupe(u8, sym_name); const res = try self.toc.getOrPut(allocator, owned_name); defer if (res.found_existing) allocator.free(owned_name); diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index fcb4c1606353..f527ca358107 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -72,7 +72,7 @@ const CodeDirectory = struct { .hashSize = hash_size, .hashType = macho.CS_HASHTYPE_SHA256, .platform = 0, - .pageSize = @truncate(u8, std.math.log2(page_size)), + .pageSize = @as(u8, @truncate(std.math.log2(page_size))), .spare2 = 0, .scatterOffset = 0, .teamOffset = 0, @@ -110,7 +110,7 @@ const CodeDirectory = struct { fn size(self: CodeDirectory) u32 { const code_slots = self.inner.nCodeSlots * hash_size; const special_slots = self.inner.nSpecialSlots * hash_size; - return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1 + special_slots + code_slots); + return @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.ident.len + 1 + special_slots + code_slots)); } fn write(self: CodeDirectory, writer: anytype) !void { @@ -139,9 +139,9 @@ const CodeDirectory = struct { try writer.writeAll(self.ident); try writer.writeByte(0); - var i: isize = @intCast(isize, self.inner.nSpecialSlots); + var i: isize = @as(isize, @intCast(self.inner.nSpecialSlots)); while (i > 0) : (i -= 1) { - try writer.writeAll(&self.special_slots[@intCast(usize, i - 1)]); + try writer.writeAll(&self.special_slots[@as(usize, @intCast(i - 1))]); } for (self.code_slots.items) |slot| { @@ -186,7 +186,7 @@ const Entitlements = struct { } fn size(self: Entitlements) u32 { - return @intCast(u32, self.inner.len) + 2 * @sizeOf(u32); + return @as(u32, @intCast(self.inner.len)) + 2 * @sizeOf(u32); } fn write(self: Entitlements, writer: anytype) !void { @@ -281,7 +281,7 @@ pub fn writeAdhocSignature( self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.codeLimit = opts.file_size; - const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size); + const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size)); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; @@ -331,7 +331,7 @@ pub fn writeAdhocSignature( } self.code_directory.inner.hashOffset = - @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size); + @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size)); self.code_directory.inner.length = self.code_directory.size(); header.length += self.code_directory.size(); @@ -339,7 +339,7 @@ pub fn writeAdhocSignature( try writer.writeIntBig(u32, header.length); try writer.writeIntBig(u32, header.count); - var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @intCast(u32, blobs.items.len); + var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @as(u32, @intCast(blobs.items.len)); for (blobs.items) |blob| { try writer.writeIntBig(u32, blob.slotType()); try writer.writeIntBig(u32, offset); @@ -383,7 +383,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { ssize += @sizeOf(macho.BlobIndex) + sig.size(); } ssize += n_special_slots * hash_size; - return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64))); + return @as(u32, @intCast(mem.alignForward(u64, ssize, @sizeOf(u64)))); } pub fn clear(self: *CodeSignature, allocator: Allocator) void { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index fdb8c9c816a0..ade26de920eb 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -64,9 +64,9 @@ pub const Reloc = struct { /// has been called to get a viable debug symbols output. pub fn populateMissingMetadata(self: *DebugSymbols) !void { if (self.dwarf_segment_cmd_index == null) { - self.dwarf_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.dwarf_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); - const off = @intCast(u64, self.page_size); + const off = @as(u64, @intCast(self.page_size)); const ideal_size: u16 = 200 + 128 + 160 + 250; const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); @@ -86,7 +86,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { try self.dwarf.strtab.buffer.append(self.allocator, 0); self.debug_str_section_index = try self.allocateSection( "__debug_str", - @intCast(u32, self.dwarf.strtab.buffer.items.len), + @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)), 0, ); self.debug_string_table_dirty = true; @@ -113,7 +113,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { } if (self.linkedit_segment_cmd_index == null) { - self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len); + self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); try self.segments.append(self.allocator, .{ .segname = makeStaticString("__LINKEDIT"), .maxprot = macho.PROT.READ, @@ -128,7 +128,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme var sect = macho.section_64{ .sectname = makeStaticString(sectname), .segname = segment.segname, - .size = @intCast(u32, size), + .size = @as(u32, @intCast(size)), .@"align" = alignment, }; const alignment_pow_2 = try math.powi(u32, 2, alignment); @@ -141,9 +141,9 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme off + size, }); - sect.offset = @intCast(u32, off); + sect.offset = @as(u32, @intCast(off)); - const index = @intCast(u8, self.sections.items.len); + const index = @as(u8, @intCast(self.sections.items.len)); try self.sections.append(self.allocator, sect); segment.cmdsize += @sizeOf(macho.section_64); segment.nsects += 1; @@ -176,7 +176,7 @@ pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requir if (amt != existing_size) return error.InputOutput; } - sect.offset = @intCast(u32, new_offset); + sect.offset = @as(u32, @intCast(new_offset)); } sect.size = needed_size; @@ -286,7 +286,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void { { const sect_index = self.debug_str_section_index.?; if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) { - const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len); + const needed_size = @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)); try self.growSection(sect_index, needed_size, false); try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset); self.debug_string_table_dirty = false; @@ -307,7 +307,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void { const ncmds = load_commands.calcNumOfLCs(lc_buffer.items); try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64)); - try self.writeHeader(macho_file, ncmds, @intCast(u32, lc_buffer.items.len)); + try self.writeHeader(macho_file, ncmds, @as(u32, @intCast(lc_buffer.items.len))); assert(!self.debug_abbrev_section_dirty); assert(!self.debug_aranges_section_dirty); @@ -378,7 +378,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) // Write segment/section headers from the binary file first. const end = macho_file.linkedit_segment_cmd_index.?; for (macho_file.segments.items[0..end], 0..) |seg, i| { - const indexes = macho_file.getSectionIndexes(@intCast(u8, i)); + const indexes = macho_file.getSectionIndexes(@as(u8, @intCast(i))); var out_seg = seg; out_seg.fileoff = 0; out_seg.filesize = 0; @@ -407,7 +407,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) } // Next, commit DSYM's __LINKEDIT and __DWARF segments headers. for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@intCast(u8, i)); + const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); try writer.writeStruct(seg); for (self.sections.items[indexes.start..indexes.end]) |header| { try writer.writeStruct(header); @@ -473,7 +473,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { for (macho_file.locals.items, 0..) |sym, sym_id| { if (sym.n_strx == 0) continue; // no name, skip - const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null }; + const sym_loc = MachO.SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }; if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip if (macho_file.getGlobal(macho_file.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip var out_sym = sym; @@ -501,10 +501,10 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { const needed_size = nsyms * @sizeOf(macho.nlist_64); seg.filesize = offset + needed_size - seg.fileoff; - self.symtab_cmd.symoff = @intCast(u32, offset); - self.symtab_cmd.nsyms = @intCast(u32, nsyms); + self.symtab_cmd.symoff = @as(u32, @intCast(offset)); + self.symtab_cmd.nsyms = @as(u32, @intCast(nsyms)); - const locals_off = @intCast(u32, offset); + const locals_off = @as(u32, @intCast(offset)); const locals_size = nlocals * @sizeOf(macho.nlist_64); const exports_off = locals_off + locals_size; const exports_size = nexports * @sizeOf(macho.nlist_64); @@ -521,13 +521,13 @@ fn writeStrtab(self: *DebugSymbols) !void { defer tracy.end(); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); + const symtab_size = @as(u32, @intCast(self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64))); const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64)); seg.filesize = offset + needed_size - seg.fileoff; - self.symtab_cmd.stroff = @intCast(u32, offset); - self.symtab_cmd.strsize = @intCast(u32, needed_size); + self.symtab_cmd.stroff = @as(u32, @intCast(offset)); + self.symtab_cmd.strsize = @as(u32, @intCast(needed_size)); log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); @@ -542,8 +542,8 @@ fn writeStrtab(self: *DebugSymbols) !void { pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } { var start: u8 = 0; const nsects = for (self.segments.items, 0..) |seg, i| { - if (i == segment_index) break @intCast(u8, seg.nsects); - start += @intCast(u8, seg.nsects); + if (i == segment_index) break @as(u8, @intCast(seg.nsects)); + start += @as(u8, @intCast(seg.nsects)); } else 0; return .{ .start = start, .end = start + nsects }; } diff --git a/src/link/MachO/DwarfInfo.zig b/src/link/MachO/DwarfInfo.zig index 3218435734f7..07d98e8e949a 100644 --- a/src/link/MachO/DwarfInfo.zig +++ b/src/link/MachO/DwarfInfo.zig @@ -70,7 +70,7 @@ pub fn genSubprogramLookupByName( low_pc = addr; } if (try attr.getConstant(self)) |constant| { - low_pc = @intCast(u64, constant); + low_pc = @as(u64, @intCast(constant)); } }, dwarf.AT.high_pc => { @@ -78,7 +78,7 @@ pub fn genSubprogramLookupByName( high_pc = addr; } if (try attr.getConstant(self)) |constant| { - high_pc = @intCast(u64, constant); + high_pc = @as(u64, @intCast(constant)); } }, else => {}, @@ -261,7 +261,7 @@ pub const Attribute = struct { switch (self.form) { dwarf.FORM.string => { - return mem.sliceTo(@ptrCast([*:0]const u8, debug_info.ptr), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(debug_info.ptr)), 0); }, dwarf.FORM.strp => { const off = if (cuh.is_64bit) @@ -499,5 +499,5 @@ fn findAbbrevEntrySize(self: DwarfInfo, da_off: usize, da_len: usize, di_off: us fn getString(self: DwarfInfo, off: u64) []const u8 { assert(off < self.debug_str.len); - return mem.sliceTo(@ptrCast([*:0]const u8, self.debug_str.ptr + @intCast(usize, off)), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.debug_str.ptr + @as(usize, @intCast(off)))), 0); } diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig index 971706dae628..ee8f34f7560d 100644 --- a/src/link/MachO/Dylib.zig +++ b/src/link/MachO/Dylib.zig @@ -75,7 +75,7 @@ pub const Id = struct { .int => |int| { var out: u32 = 0; const major = math.cast(u16, int) orelse return error.Overflow; - out += @intCast(u32, major) << 16; + out += @as(u32, @intCast(major)) << 16; return out; }, .float => |float| { @@ -106,9 +106,9 @@ pub const Id = struct { out += try fmt.parseInt(u8, values[2], 10); } if (count > 1) { - out += @intCast(u32, try fmt.parseInt(u8, values[1], 10)) << 8; + out += @as(u32, @intCast(try fmt.parseInt(u8, values[1], 10))) << 8; } - out += @intCast(u32, try fmt.parseInt(u16, values[0], 10)) << 16; + out += @as(u32, @intCast(try fmt.parseInt(u16, values[0], 10))) << 16; return out; } @@ -164,11 +164,11 @@ pub fn parseFromBinary( switch (cmd.cmd()) { .SYMTAB => { const symtab_cmd = cmd.cast(macho.symtab_command).?; - const symtab = @ptrCast( + const symtab = @as( [*]const macho.nlist_64, // Alignment is guaranteed as a dylib is a final linked image and has to have sections // properly aligned in order to be correctly loaded by the loader. - @alignCast(@alignOf(macho.nlist_64), &data[symtab_cmd.symoff]), + @ptrCast(@alignCast(&data[symtab_cmd.symoff])), )[0..symtab_cmd.nsyms]; const strtab = data[symtab_cmd.stroff..][0..symtab_cmd.strsize]; @@ -176,7 +176,7 @@ pub fn parseFromBinary( const add_to_symtab = sym.ext() and (sym.sect() or sym.indr()); if (!add_to_symtab) continue; - const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0); + const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0); try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), false); } }, diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 105a806075b2..29fe2988b610 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -164,7 +164,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) else => {}, } else return; - self.in_symtab = @ptrCast([*]align(1) const macho.nlist_64, self.contents.ptr + symtab.symoff)[0..symtab.nsyms]; + self.in_symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(self.contents.ptr + symtab.symoff))[0..symtab.nsyms]; self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize]; self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects); @@ -202,7 +202,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) defer sorted_all_syms.deinit(); for (self.in_symtab.?, 0..) |_, index| { - sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) }); + sorted_all_syms.appendAssumeCapacity(.{ .index = @as(u32, @intCast(index)) }); } // We sort by type: defined < undefined, and @@ -225,18 +225,18 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) } } if (sym.sect() and section_index_lookup == null) { - section_index_lookup = .{ .start = @intCast(u32, i), .len = 1 }; + section_index_lookup = .{ .start = @as(u32, @intCast(i)), .len = 1 }; } prev_sect_id = sym.n_sect; self.symtab[i] = sym; self.source_symtab_lookup[i] = sym_id.index; - self.reverse_symtab_lookup[sym_id.index] = @intCast(u32, i); - self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value); + self.reverse_symtab_lookup[sym_id.index] = @as(u32, @intCast(i)); + self.source_address_lookup[i] = if (sym.undf()) -1 else @as(i64, @intCast(sym.n_value)); - const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1; - self.strtab_lookup[i] = @intCast(u32, sym_name_len); + const sym_name_len = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.in_strtab.?.ptr + sym.n_strx)), 0).len + 1; + self.strtab_lookup[i] = @as(u32, @intCast(sym_name_len)); } // If there were no undefined symbols, make sure we populate the @@ -267,7 +267,7 @@ const SymbolAtIndex = struct { fn getSymbolName(self: SymbolAtIndex, ctx: Context) []const u8 { const off = self.getSymbol(ctx).n_strx; - return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.in_strtab.?.ptr + off)), 0); } fn getSymbolSeniority(self: SymbolAtIndex, ctx: Context) u2 { @@ -338,7 +338,7 @@ fn filterSymbolsBySection(symbols: []macho.nlist_64, n_sect: u8) struct { .n_sect = n_sect, }); - return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) }; + return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) }; } fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: u64) struct { @@ -360,7 +360,7 @@ fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: .addr = end_addr, }); - return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) }; + return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) }; } const SortedSection = struct { @@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { }; if (sect.size == 0) continue; - const sect_id = @intCast(u8, id); + const sect_id = @as(u8, @intCast(id)); const sym = self.getSectionAliasSymbolPtr(sect_id); sym.* = .{ .n_strx = 0, @@ -417,7 +417,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { const out_sect_id = (try zld.getOutputSection(sect)) orelse continue; if (sect.size == 0) continue; - const sect_id = @intCast(u8, id); + const sect_id = @as(u8, @intCast(id)); const sym_index = self.getSectionAliasSymbolIndex(sect_id); const atom_index = try self.createAtomFromSubsection( zld, @@ -459,7 +459,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { defer gpa.free(sorted_sections); for (sections, 0..) |sect, id| { - sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) }; + sorted_sections[id] = .{ .header = sect, .id = @as(u8, @intCast(id)) }; } mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress); @@ -651,7 +651,7 @@ fn filterRelocs( const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr }); const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr }); - return .{ .start = @intCast(u32, start), .len = @intCast(u32, len) }; + return .{ .start = @as(u32, @intCast(start)), .len = @as(u32, @intCast(len)) }; } /// Parse all relocs for the input section, and sort in descending order. @@ -659,7 +659,7 @@ fn filterRelocs( /// section in a sorted manner which is simply not true. fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void { const section = self.getSourceSection(sect_id); - const start = @intCast(u32, self.relocations.items.len); + const start = @as(u32, @intCast(self.relocations.items.len)); if (self.getSourceRelocs(section)) |relocs| { try self.relocations.ensureUnusedCapacity(gpa, relocs.len); self.relocations.appendUnalignedSliceAssumeCapacity(relocs); @@ -677,8 +677,8 @@ fn cacheRelocs(self: *Object, zld: *Zld, atom_index: AtomIndex) !void { // If there was no matching symbol present in the source symtab, this means // we are dealing with either an entire section, or part of it, but also // starting at the beginning. - const nbase = @intCast(u32, self.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(self.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk sect_id; }; const source_sect = self.getSourceSection(source_sect_id); @@ -745,7 +745,7 @@ fn parseEhFrameSection(self: *Object, zld: *Zld, object_id: u32) !void { .object_id = object_id, .rel = rel, .code = it.data[offset..], - .base_offset = @intCast(i32, offset), + .base_offset = @as(i32, @intCast(offset)), }); break :blk target; }, @@ -798,7 +798,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void { _ = try zld.initSection("__TEXT", "__unwind_info", .{}); } - try self.unwind_records_lookup.ensureTotalCapacity(gpa, @intCast(u32, self.exec_atoms.items.len)); + try self.unwind_records_lookup.ensureTotalCapacity(gpa, @as(u32, @intCast(self.exec_atoms.items.len))); const unwind_records = self.getUnwindRecords(); @@ -834,14 +834,14 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void { .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, offset), + .base_offset = @as(i32, @intCast(offset)), }); log.debug("unwind record {d} tracks {s}", .{ record_id, zld.getSymbolName(target) }); if (target.getFile() != object_id) { self.unwind_relocs_lookup[record_id].dead = true; } else { const atom_index = self.getAtomIndexForSymbol(target.sym_index).?; - self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @intCast(u32, record_id)); + self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @as(u32, @intCast(record_id))); } } } @@ -869,7 +869,7 @@ pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname: const sections = self.getSourceSections(); for (sections, 0..) |sect, i| { if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName())) - return @intCast(u8, i); + return @as(u8, @intCast(i)); } else return null; } @@ -898,7 +898,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void { } } else return; const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry)); - const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice]; + const dice = @as([*]align(1) const macho.data_in_code_entry, @ptrCast(self.contents.ptr + cmd.dataoff))[0..ndice]; try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len); self.data_in_code.appendUnalignedSliceAssumeCapacity(dice); mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan); @@ -945,12 +945,12 @@ pub fn parseDwarfInfo(self: Object) DwarfInfo { } pub fn getSectionContents(self: Object, sect: macho.section_64) []const u8 { - const size = @intCast(usize, sect.size); + const size = @as(usize, @intCast(sect.size)); return self.contents[sect.offset..][0..size]; } pub fn getSectionAliasSymbolIndex(self: Object, sect_id: u8) u32 { - const start = @intCast(u32, self.in_symtab.?.len); + const start = @as(u32, @intCast(self.in_symtab.?.len)); return start + sect_id; } @@ -964,7 +964,7 @@ pub fn getSectionAliasSymbolPtr(self: *Object, sect_id: u8) *macho.nlist_64 { fn getSourceRelocs(self: Object, sect: macho.section_64) ?[]align(1) const macho.relocation_info { if (sect.nreloc == 0) return null; - return @ptrCast([*]align(1) const macho.relocation_info, self.contents.ptr + sect.reloff)[0..sect.nreloc]; + return @as([*]align(1) const macho.relocation_info, @ptrCast(self.contents.ptr + sect.reloff))[0..sect.nreloc]; } pub fn getRelocs(self: Object, sect_id: u8) []const macho.relocation_info { @@ -1005,25 +1005,25 @@ pub fn getSymbolByAddress(self: Object, addr: u64, sect_hint: ?u8) u32 { const target_sym_index = @import("zld.zig").lsearch( i64, self.source_address_lookup[lookup.start..][0..lookup.len], - Predicate{ .addr = @intCast(i64, addr) }, + Predicate{ .addr = @as(i64, @intCast(addr)) }, ); if (target_sym_index > 0) { - return @intCast(u32, lookup.start + target_sym_index - 1); + return @as(u32, @intCast(lookup.start + target_sym_index - 1)); } } return self.getSectionAliasSymbolIndex(sect_id); } const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup, Predicate{ - .addr = @intCast(i64, addr), + .addr = @as(i64, @intCast(addr)), }); assert(target_sym_index > 0); - return @intCast(u32, target_sym_index - 1); + return @as(u32, @intCast(target_sym_index - 1)); } pub fn getGlobal(self: Object, sym_index: u32) ?u32 { if (self.globals_lookup[sym_index] == -1) return null; - return @intCast(u32, self.globals_lookup[sym_index]); + return @as(u32, @intCast(self.globals_lookup[sym_index])); } pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex { @@ -1041,7 +1041,7 @@ pub fn getUnwindRecords(self: Object) []align(1) const macho.compact_unwind_entr const sect = self.getSourceSection(sect_id); const data = self.getSectionContents(sect); const num_entries = @divExact(data.len, @sizeOf(macho.compact_unwind_entry)); - return @ptrCast([*]align(1) const macho.compact_unwind_entry, data)[0..num_entries]; + return @as([*]align(1) const macho.compact_unwind_entry, @ptrCast(data))[0..num_entries]; } pub fn hasEhFrameRecords(self: Object) bool { diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig index 2685cc26e2e7..b7bbf59cfcf2 100644 --- a/src/link/MachO/Relocation.zig +++ b/src/link/MachO/Relocation.zig @@ -94,9 +94,9 @@ pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, cod .tlv_initializer => blk: { assert(self.addend == 0); // Addend here makes no sense. const header = macho_file.sections.items(.header)[macho_file.thread_data_section_index.?]; - break :blk @intCast(i64, target_base_addr - header.addr); + break :blk @as(i64, @intCast(target_base_addr - header.addr)); }, - else => @intCast(i64, target_base_addr) + self.addend, + else => @as(i64, @intCast(target_base_addr)) + self.addend, }; log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{ @@ -119,7 +119,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] .branch => { const displacement = math.cast( i28, - @intCast(i64, target_addr) - @intCast(i64, source_addr), + @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)), ) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement var inst = aarch64.Instruction{ .unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload( @@ -127,25 +127,25 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] aarch64.Instruction.unconditional_branch_immediate, ), buffer[0..4]), }; - inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2)); + inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2)))); mem.writeIntLittle(u32, buffer[0..4], inst.toU32()); }, .page, .got_page => { - const source_page = @intCast(i32, source_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - source_page)); + const source_page = @as(i32, @intCast(source_addr >> 12)); + const target_page = @as(i32, @intCast(target_addr >> 12)); + const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page)))); var inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload( aarch64.Instruction, aarch64.Instruction.pc_relative_address, ), buffer[0..4]), }; - inst.pc_relative_address.immhi = @truncate(u19, pages >> 2); - inst.pc_relative_address.immlo = @truncate(u2, pages); + inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); + inst.pc_relative_address.immlo = @as(u2, @truncate(pages)); mem.writeIntLittle(u32, buffer[0..4], inst.toU32()); }, .pageoff, .got_pageoff => { - const narrowed = @truncate(u12, @intCast(u64, target_addr)); + const narrowed = @as(u12, @truncate(@as(u64, @intCast(target_addr)))); if (isArithmeticOp(buffer[0..4])) { var inst = aarch64.Instruction{ .add_subtract_immediate = mem.bytesToValue(meta.TagPayload( @@ -180,8 +180,8 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] } }, .tlv_initializer, .unsigned => switch (self.length) { - 2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr))), - 3 => mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr)), + 2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))), + 3 => mem.writeIntLittle(u64, buffer[0..8], @as(u64, @bitCast(target_addr))), else => unreachable, }, .got, .signed, .tlv => unreachable, // Invalid target architecture. @@ -191,16 +191,16 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: [] fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8) void { switch (self.type) { .branch, .got, .tlv, .signed => { - const displacement = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4); - mem.writeIntLittle(u32, code[self.offset..][0..4], @bitCast(u32, displacement)); + const displacement = @as(i32, @intCast(@as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)) - 4)); + mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @bitCast(displacement))); }, .tlv_initializer, .unsigned => { switch (self.length) { 2 => { - mem.writeIntLittle(u32, code[self.offset..][0..4], @truncate(u32, @bitCast(u64, target_addr))); + mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))); }, 3 => { - mem.writeIntLittle(u64, code[self.offset..][0..8], @bitCast(u64, target_addr)); + mem.writeIntLittle(u64, code[self.offset..][0..8], @as(u64, @bitCast(target_addr))); }, else => unreachable, } @@ -210,24 +210,24 @@ fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8 } pub inline fn isArithmeticOp(inst: *const [4]u8) bool { - const group_decode = @truncate(u5, inst[3]); + const group_decode = @as(u5, @truncate(inst[3])); return ((group_decode >> 2) == 4); } pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 { - const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction); + const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 4 + correction)); return math.cast(i32, disp) orelse error.Overflow; } pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 { - const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr); + const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)); return math.cast(i28, disp) orelse error.Overflow; } pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 { - const source_page = @intCast(i32, source_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @intCast(i21, target_page - source_page); + const source_page = @as(i32, @intCast(source_addr >> 12)); + const target_page = @as(i32, @intCast(target_addr >> 12)); + const pages = @as(i21, @intCast(target_page - source_page)); return pages; } @@ -241,7 +241,7 @@ pub const PageOffsetInstKind = enum { }; pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 { - const narrowed = @truncate(u12, target_addr); + const narrowed = @as(u12, @truncate(target_addr)); return switch (kind) { .arithmetic, .load_store_8 => narrowed, .load_store_16 => try math.divExact(u12, narrowed, 2), diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig index 34200db7dc72..cabe611b6447 100644 --- a/src/link/MachO/Trie.zig +++ b/src/link/MachO/Trie.zig @@ -220,7 +220,7 @@ pub const Node = struct { try writer.writeByte(0); } // Write number of edges (max legal number of edges is 256). - try writer.writeByte(@intCast(u8, self.edges.items.len)); + try writer.writeByte(@as(u8, @intCast(self.edges.items.len))); for (self.edges.items) |edge| { // Write edge label and offset to next node in trie. diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig index 3c9a438f92fa..cfef053d1b6c 100644 --- a/src/link/MachO/UnwindInfo.zig +++ b/src/link/MachO/UnwindInfo.zig @@ -87,7 +87,7 @@ const Page = struct { const record_id = page.page_encodings[index]; const record = info.records.items[record_id]; if (record.compactUnwindEncoding == enc) { - return @intCast(u8, index); + return @as(u8, @intCast(index)); } } return null; @@ -150,14 +150,14 @@ const Page = struct { for (info.records.items[page.start..][0..page.count]) |record| { try writer.writeStruct(macho.unwind_info_regular_second_level_entry{ - .functionOffset = @intCast(u32, record.rangeStart), + .functionOffset = @as(u32, @intCast(record.rangeStart)), .encoding = record.compactUnwindEncoding, }); } }, .compressed => { const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) + - @intCast(u16, page.page_encodings_count) * @sizeOf(u32); + @as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32); try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{ .entryPageOffset = entry_offset, .entryCount = page.count, @@ -183,8 +183,8 @@ const Page = struct { break :blk ncommon + page.getPageEncoding(info, record.compactUnwindEncoding).?; }; const compressed = macho.UnwindInfoCompressedEntry{ - .funcOffset = @intCast(u24, record.rangeStart - first_entry.rangeStart), - .encodingIndex = @intCast(u8, enc_index), + .funcOffset = @as(u24, @intCast(record.rangeStart - first_entry.rangeStart)), + .encodingIndex = @as(u8, @intCast(enc_index)), }; try writer.writeStruct(compressed); } @@ -214,15 +214,15 @@ pub fn scanRelocs(zld: *Zld) !void { if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) { if (getPersonalityFunctionReloc( zld, - @intCast(u32, object_id), + @as(u32, @intCast(object_id)), record_id, )) |rel| { // Personality function; add GOT pointer. const target = Atom.parseRelocTarget(zld, .{ - .object_id = @intCast(u32, object_id), + .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); try Atom.addGotEntry(zld, target); } @@ -258,18 +258,18 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { var record = unwind_records[record_id]; if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) { - try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record); + try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record); } else { if (getPersonalityFunctionReloc( zld, - @intCast(u32, object_id), + @as(u32, @intCast(object_id)), record_id, )) |rel| { const target = Atom.parseRelocTarget(zld, .{ - .object_id = @intCast(u32, object_id), + .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); const personality_index = info.getPersonalityFunction(target) orelse inner: { const personality_index = info.personalities_count; @@ -282,14 +282,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1); } - if (getLsdaReloc(zld, @intCast(u32, object_id), record_id)) |rel| { + if (getLsdaReloc(zld, @as(u32, @intCast(object_id)), record_id)) |rel| { const target = Atom.parseRelocTarget(zld, .{ - .object_id = @intCast(u32, object_id), + .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - record.lsda = @bitCast(u64, target); + record.lsda = @as(u64, @bitCast(target)); } } break :blk record; @@ -302,7 +302,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| { if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue; var record = nullRecord(); - try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record); + try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record); switch (cpu_arch) { .aarch64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_ARM64_MODE.DWARF), .x86_64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_X86_64_MODE.DWARF), @@ -320,7 +320,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { const sym = zld.getSymbol(sym_loc); assert(sym.n_desc != N_DEAD); record.rangeStart = sym.n_value; - record.rangeLength = @intCast(u32, atom.size); + record.rangeLength = @as(u32, @intCast(atom.size)); records.appendAssumeCapacity(record); atom_indexes.appendAssumeCapacity(atom_index); @@ -329,7 +329,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { // Fold records try info.records.ensureTotalCapacity(info.gpa, records.items.len); - try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len)); + try info.records_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(atom_indexes.items.len))); var maybe_prev: ?macho.compact_unwind_entry = null; for (records.items, 0..) |record, i| { @@ -341,15 +341,15 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { (prev.personalityFunction != record.personalityFunction) or record.lsda > 0) { - const record_id = @intCast(RecordIndex, info.records.items.len); + const record_id = @as(RecordIndex, @intCast(info.records.items.len)); info.records.appendAssumeCapacity(record); maybe_prev = record; break :blk record_id; } else { - break :blk @intCast(RecordIndex, info.records.items.len - 1); + break :blk @as(RecordIndex, @intCast(info.records.items.len - 1)); } } else { - const record_id = @intCast(RecordIndex, info.records.items.len); + const record_id = @as(RecordIndex, @intCast(info.records.items.len)); info.records.appendAssumeCapacity(record); maybe_prev = record; break :blk record_id; @@ -459,14 +459,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { } } - page.count = @intCast(u16, i - page.start); + page.count = @as(u16, @intCast(i - page.start)); if (i < info.records.items.len and page.count < max_regular_second_level_entries) { page.kind = .regular; - page.count = @intCast(u16, @min( + page.count = @as(u16, @intCast(@min( max_regular_second_level_entries, info.records.items.len - page.start, - )); + ))); i = page.start + page.count; } else { page.kind = .compressed; @@ -479,11 +479,11 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { } // Save indices of records requiring LSDA relocation - try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len)); + try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(info.records.items.len))); for (info.records.items, 0..) |rec, i| { - info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len)); + info.lsdas_lookup.putAssumeCapacityNoClobber(@as(RecordIndex, @intCast(i)), @as(u32, @intCast(info.lsdas.items.len))); if (rec.lsda == 0) continue; - try info.lsdas.append(info.gpa, @intCast(RecordIndex, i)); + try info.lsdas.append(info.gpa, @as(RecordIndex, @intCast(i))); } } @@ -506,7 +506,7 @@ fn collectPersonalityFromDwarf( if (cie.getPersonalityPointerReloc( zld, - @intCast(u32, object_id), + @as(u32, @intCast(object_id)), cie_offset, )) |target| { const personality_index = info.getPersonalityFunction(target) orelse inner: { @@ -532,8 +532,8 @@ fn calcRequiredSize(info: UnwindInfo) usize { var total_size: usize = 0; total_size += @sizeOf(macho.unwind_info_section_header); total_size += - @intCast(usize, info.common_encodings_count) * @sizeOf(macho.compact_unwind_encoding_t); - total_size += @intCast(usize, info.personalities_count) * @sizeOf(u32); + @as(usize, @intCast(info.common_encodings_count)) * @sizeOf(macho.compact_unwind_encoding_t); + total_size += @as(usize, @intCast(info.personalities_count)) * @sizeOf(u32); total_size += (info.pages.items.len + 1) * @sizeOf(macho.unwind_info_section_header_index_entry); total_size += info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry); total_size += info.pages.items.len * second_level_page_bytes; @@ -557,7 +557,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { const atom_index = zld.getGotAtomIndexForSymbol(target).?; const atom = zld.getAtom(atom_index); const sym = zld.getSymbol(atom.getSymbolWithLoc()); - personalities[i] = @intCast(u32, sym.n_value - seg.vmaddr); + personalities[i] = @as(u32, @intCast(sym.n_value - seg.vmaddr)); log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], zld.getSymbolName(target) }); } @@ -570,7 +570,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { } if (rec.compactUnwindEncoding > 0 and !UnwindEncoding.isDwarf(rec.compactUnwindEncoding, cpu_arch)) { - const lsda_target = @bitCast(SymbolWithLoc, rec.lsda); + const lsda_target = @as(SymbolWithLoc, @bitCast(rec.lsda)); if (lsda_target.getFile()) |_| { const sym = zld.getSymbol(lsda_target); rec.lsda = sym.n_value - seg.vmaddr; @@ -601,7 +601,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32); const personalities_count: u32 = info.personalities_count; const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32); - const indexes_count: u32 = @intCast(u32, info.pages.items.len + 1); + const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1)); try writer.writeStruct(macho.unwind_info_section_header{ .commonEncodingsArraySectionOffset = common_encodings_offset, @@ -615,34 +615,34 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void { try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count])); try writer.writeAll(mem.sliceAsBytes(personalities[0..info.personalities_count])); - const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes)); - const lsda_base_offset = @intCast(u32, pages_base_offset - - (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry))); + const pages_base_offset = @as(u32, @intCast(size - (info.pages.items.len * second_level_page_bytes))); + const lsda_base_offset = @as(u32, @intCast(pages_base_offset - + (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)))); for (info.pages.items, 0..) |page, i| { assert(page.count > 0); const first_entry = info.records.items[page.start]; try writer.writeStruct(macho.unwind_info_section_header_index_entry{ - .functionOffset = @intCast(u32, first_entry.rangeStart), - .secondLevelPagesSectionOffset = @intCast(u32, pages_base_offset + i * second_level_page_bytes), + .functionOffset = @as(u32, @intCast(first_entry.rangeStart)), + .secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)), .lsdaIndexArraySectionOffset = lsda_base_offset + info.lsdas_lookup.get(page.start).? * @sizeOf(macho.unwind_info_section_header_lsda_index_entry), }); } const last_entry = info.records.items[info.records.items.len - 1]; - const sentinel_address = @intCast(u32, last_entry.rangeStart + last_entry.rangeLength); + const sentinel_address = @as(u32, @intCast(last_entry.rangeStart + last_entry.rangeLength)); try writer.writeStruct(macho.unwind_info_section_header_index_entry{ .functionOffset = sentinel_address, .secondLevelPagesSectionOffset = 0, .lsdaIndexArraySectionOffset = lsda_base_offset + - @intCast(u32, info.lsdas.items.len) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry), + @as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry), }); for (info.lsdas.items) |record_id| { const record = info.records.items[record_id]; try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{ - .functionOffset = @intCast(u32, record.rangeStart), - .lsdaOffset = @intCast(u32, record.lsda), + .functionOffset = @as(u32, @intCast(record.rangeStart)), + .lsdaOffset = @as(u32, @intCast(record.lsda)), }); } @@ -674,7 +674,7 @@ fn getRelocs(zld: *Zld, object_id: u32, record_id: usize) []const macho.relocati } fn isPersonalityFunction(record_id: usize, rel: macho.relocation_info) bool { - const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)); + const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))); const rel_offset = rel.r_address - base_offset; return rel_offset == 16; } @@ -703,7 +703,7 @@ fn getPersonalityFunction(info: UnwindInfo, global_index: SymbolWithLoc) ?u2 { } fn isLsda(record_id: usize, rel: macho.relocation_info) bool { - const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)); + const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))); const rel_offset = rel.r_address - base_offset; return rel_offset == 24; } @@ -754,45 +754,45 @@ fn getCommonEncoding(info: UnwindInfo, enc: macho.compact_unwind_encoding_t) ?u7 pub const UnwindEncoding = struct { pub fn getMode(enc: macho.compact_unwind_encoding_t) u4 { comptime assert(macho.UNWIND_ARM64_MODE_MASK == macho.UNWIND_X86_64_MODE_MASK); - return @truncate(u4, (enc & macho.UNWIND_ARM64_MODE_MASK) >> 24); + return @as(u4, @truncate((enc & macho.UNWIND_ARM64_MODE_MASK) >> 24)); } pub fn isDwarf(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) bool { const mode = getMode(enc); return switch (cpu_arch) { - .aarch64 => @enumFromInt(macho.UNWIND_ARM64_MODE, mode) == .DWARF, - .x86_64 => @enumFromInt(macho.UNWIND_X86_64_MODE, mode) == .DWARF, + .aarch64 => @as(macho.UNWIND_ARM64_MODE, @enumFromInt(mode)) == .DWARF, + .x86_64 => @as(macho.UNWIND_X86_64_MODE, @enumFromInt(mode)) == .DWARF, else => unreachable, }; } pub fn setMode(enc: *macho.compact_unwind_encoding_t, mode: anytype) void { - enc.* |= @intCast(u32, @intFromEnum(mode)) << 24; + enc.* |= @as(u32, @intCast(@intFromEnum(mode))) << 24; } pub fn hasLsda(enc: macho.compact_unwind_encoding_t) bool { - const has_lsda = @truncate(u1, (enc & macho.UNWIND_HAS_LSDA) >> 31); + const has_lsda = @as(u1, @truncate((enc & macho.UNWIND_HAS_LSDA) >> 31)); return has_lsda == 1; } pub fn setHasLsda(enc: *macho.compact_unwind_encoding_t, has_lsda: bool) void { - const mask = @intCast(u32, @intFromBool(has_lsda)) << 31; + const mask = @as(u32, @intCast(@intFromBool(has_lsda))) << 31; enc.* |= mask; } pub fn getPersonalityIndex(enc: macho.compact_unwind_encoding_t) u2 { - const index = @truncate(u2, (enc & macho.UNWIND_PERSONALITY_MASK) >> 28); + const index = @as(u2, @truncate((enc & macho.UNWIND_PERSONALITY_MASK) >> 28)); return index; } pub fn setPersonalityIndex(enc: *macho.compact_unwind_encoding_t, index: u2) void { - const mask = @intCast(u32, index) << 28; + const mask = @as(u32, @intCast(index)) << 28; enc.* |= mask; } pub fn getDwarfSectionOffset(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) u24 { assert(isDwarf(enc, cpu_arch)); - const offset = @truncate(u24, enc); + const offset = @as(u24, @truncate(enc)); return offset; } diff --git a/src/link/MachO/ZldAtom.zig b/src/link/MachO/ZldAtom.zig index 55a6325a5ad5..613f0fc86c29 100644 --- a/src/link/MachO/ZldAtom.zig +++ b/src/link/MachO/ZldAtom.zig @@ -117,8 +117,8 @@ pub fn getSectionAlias(zld: *Zld, atom_index: AtomIndex) ?SymbolWithLoc { assert(atom.getFile() != null); const object = zld.objects.items[atom.getFile().?]; - const nbase = @intCast(u32, object.in_symtab.?.len); - const ntotal = @intCast(u32, object.symtab.len); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const ntotal = @as(u32, @intCast(object.symtab.len)); var sym_index: u32 = nbase; while (sym_index < ntotal) : (sym_index += 1) { if (object.getAtomIndexForSymbol(sym_index)) |other_atom_index| { @@ -144,8 +144,8 @@ pub fn calcInnerSymbolOffset(zld: *Zld, atom_index: AtomIndex, sym_index: u32) u const base_addr = if (object.getSourceSymbol(atom.sym_index)) |sym| sym.n_value else blk: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); const source_sect = object.getSourceSection(sect_id); break :blk source_sect.addr; }; @@ -177,15 +177,15 @@ pub fn getRelocContext(zld: *Zld, atom_index: AtomIndex) RelocContext { if (object.getSourceSymbol(atom.sym_index)) |source_sym| { const source_sect = object.getSourceSection(source_sym.n_sect - 1); return .{ - .base_addr = @intCast(i64, source_sect.addr), - .base_offset = @intCast(i32, source_sym.n_value - source_sect.addr), + .base_addr = @as(i64, @intCast(source_sect.addr)), + .base_offset = @as(i32, @intCast(source_sym.n_value - source_sect.addr)), }; } - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); const source_sect = object.getSourceSection(sect_id); return .{ - .base_addr = @intCast(i64, source_sect.addr), + .base_addr = @as(i64, @intCast(source_sect.addr)), .base_offset = 0, }; } @@ -204,8 +204,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct { log.debug("parsing reloc target in object({d}) '{s}' ", .{ ctx.object_id, object.name }); const sym_index = if (ctx.rel.r_extern == 0) sym_index: { - const sect_id = @intCast(u8, ctx.rel.r_symbolnum - 1); - const rel_offset = @intCast(u32, ctx.rel.r_address - ctx.base_offset); + const sect_id = @as(u8, @intCast(ctx.rel.r_symbolnum - 1)); + const rel_offset = @as(u32, @intCast(ctx.rel.r_address - ctx.base_offset)); const address_in_section = if (ctx.rel.r_pcrel == 0) blk: { break :blk if (ctx.rel.r_length == 3) @@ -214,7 +214,7 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct { mem.readIntLittle(u32, ctx.code[rel_offset..][0..4]); } else blk: { assert(zld.options.target.cpu.arch == .x86_64); - const correction: u3 = switch (@enumFromInt(macho.reloc_type_x86_64, ctx.rel.r_type)) { + const correction: u3 = switch (@as(macho.reloc_type_x86_64, @enumFromInt(ctx.rel.r_type))) { .X86_64_RELOC_SIGNED => 0, .X86_64_RELOC_SIGNED_1 => 1, .X86_64_RELOC_SIGNED_2 => 2, @@ -222,8 +222,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct { else => unreachable, }; const addend = mem.readIntLittle(i32, ctx.code[rel_offset..][0..4]); - const target_address = @intCast(i64, ctx.base_addr) + ctx.rel.r_address + 4 + correction + addend; - break :blk @intCast(u64, target_address); + const target_address = @as(i64, @intCast(ctx.base_addr)) + ctx.rel.r_address + 4 + correction + addend; + break :blk @as(u64, @intCast(target_address)); }; // Find containing atom @@ -272,7 +272,7 @@ pub fn getRelocTargetAtomIndex(zld: *Zld, target: SymbolWithLoc, is_via_got: boo fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void { for (relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_ADDEND, .ARM64_RELOC_SUBTRACTOR => continue, @@ -318,7 +318,7 @@ fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) cons fn scanAtomRelocsX86(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void { for (relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_SUBTRACTOR => continue, @@ -364,7 +364,7 @@ fn addTlvPtrEntry(zld: *Zld, target: SymbolWithLoc) !void { const gpa = zld.gpa; const atom_index = try zld.createTlvPtrAtom(); - const tlv_ptr_index = @intCast(u32, zld.tlv_ptr_entries.items.len); + const tlv_ptr_index = @as(u32, @intCast(zld.tlv_ptr_entries.items.len)); try zld.tlv_ptr_entries.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -376,7 +376,7 @@ pub fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void { if (zld.got_table.contains(target)) return; const gpa = zld.gpa; const atom_index = try zld.createGotAtom(); - const got_index = @intCast(u32, zld.got_entries.items.len); + const got_index = @as(u32, @intCast(zld.got_entries.items.len)); try zld.got_entries.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -393,7 +393,7 @@ pub fn addStub(zld: *Zld, target: SymbolWithLoc) !void { _ = try zld.createStubHelperAtom(); _ = try zld.createLazyPointerAtom(); const atom_index = try zld.createStubAtom(); - const stubs_index = @intCast(u32, zld.stubs.items.len); + const stubs_index = @as(u32, @intCast(zld.stubs.items.len)); try zld.stubs.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -489,7 +489,7 @@ fn resolveRelocsArm64( var subtractor: ?SymbolWithLoc = null; for (atom_relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_ADDEND => { @@ -529,7 +529,7 @@ fn resolveRelocsArm64( .base_addr = context.base_addr, .base_offset = context.base_offset, }); - const rel_offset = @intCast(u32, rel.r_address - context.base_offset); + const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset)); log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{ @tagName(rel_type), @@ -590,7 +590,7 @@ fn resolveRelocsArm64( aarch64.Instruction.unconditional_branch_immediate, ), code), }; - inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2)); + inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2)))); mem.writeIntLittle(u32, code, inst.toU32()); }, @@ -598,11 +598,11 @@ fn resolveRelocsArm64( .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_TLVP_LOAD_PAGE21, => { - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); - const pages = @bitCast(u21, Relocation.calcNumberOfPages(source_addr, adjusted_target_addr)); + const pages = @as(u21, @bitCast(Relocation.calcNumberOfPages(source_addr, adjusted_target_addr))); const code = atom_code[rel_offset..][0..4]; var inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload( @@ -610,14 +610,14 @@ fn resolveRelocsArm64( aarch64.Instruction.pc_relative_address, ), code), }; - inst.pc_relative_address.immhi = @truncate(u19, pages >> 2); - inst.pc_relative_address.immlo = @truncate(u2, pages); + inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); + inst.pc_relative_address.immlo = @as(u2, @truncate(pages)); mem.writeIntLittle(u32, code, inst.toU32()); addend = null; }, .ARM64_RELOC_PAGEOFF12 => { - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -656,7 +656,7 @@ fn resolveRelocsArm64( .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => { const code = atom_code[rel_offset..][0..4]; - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -674,7 +674,7 @@ fn resolveRelocsArm64( .ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => { const code = atom_code[rel_offset..][0..4]; - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0)); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0))); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -725,7 +725,7 @@ fn resolveRelocsArm64( .sh = 0, .s = 0, .op = 0, - .sf = @truncate(u1, reg_info.size), + .sf = @as(u1, @truncate(reg_info.size)), }, }; mem.writeIntLittle(u32, code, inst.toU32()); @@ -734,9 +734,9 @@ fn resolveRelocsArm64( .ARM64_RELOC_POINTER_TO_GOT => { log.debug(" | target_addr = 0x{x}", .{target_addr}); - const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse + const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse return error.Overflow; - mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @bitCast(u32, result)); + mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @bitCast(result))); }, .ARM64_RELOC_UNSIGNED => { @@ -747,7 +747,7 @@ fn resolveRelocsArm64( if (rel.r_extern == 0) { const base_addr = if (target.sym_index >= object.source_address_lookup.len) - @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr) + @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr)) else object.source_address_lookup[target.sym_index]; ptr_addend -= base_addr; @@ -756,17 +756,17 @@ fn resolveRelocsArm64( const result = blk: { if (subtractor) |sub| { const sym = zld.getSymbol(sub); - break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + ptr_addend; + break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + ptr_addend; } else { - break :blk @intCast(i64, target_addr) + ptr_addend; + break :blk @as(i64, @intCast(target_addr)) + ptr_addend; } }; log.debug(" | target_addr = 0x{x}", .{result}); if (rel.r_length == 3) { - mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result)); + mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result))); } else { - mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result))); + mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result))))); } subtractor = null; @@ -791,7 +791,7 @@ fn resolveRelocsX86( var subtractor: ?SymbolWithLoc = null; for (atom_relocs) |rel| { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_SUBTRACTOR => { @@ -823,7 +823,7 @@ fn resolveRelocsX86( .base_addr = context.base_addr, .base_offset = context.base_offset, }); - const rel_offset = @intCast(u32, rel.r_address - context.base_offset); + const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset)); log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{ @tagName(rel_type), @@ -851,7 +851,7 @@ fn resolveRelocsX86( switch (rel_type) { .X86_64_RELOC_BRANCH => { const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp); @@ -861,7 +861,7 @@ fn resolveRelocsX86( .X86_64_RELOC_GOT_LOAD, => { const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp); @@ -869,7 +869,7 @@ fn resolveRelocsX86( .X86_64_RELOC_TLV => { const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); @@ -897,14 +897,14 @@ fn resolveRelocsX86( if (rel.r_extern == 0) { const base_addr = if (target.sym_index >= object.source_address_lookup.len) - @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr) + @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr)) else object.source_address_lookup[target.sym_index]; - addend += @intCast(i32, @intCast(i64, context.base_addr) + rel.r_address + 4 - - @intCast(i64, base_addr)); + addend += @as(i32, @intCast(@as(i64, @intCast(context.base_addr)) + rel.r_address + 4 - + @as(i64, @intCast(base_addr)))); } - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); log.debug(" | target_addr = 0x{x}", .{adjusted_target_addr}); @@ -920,7 +920,7 @@ fn resolveRelocsX86( if (rel.r_extern == 0) { const base_addr = if (target.sym_index >= object.source_address_lookup.len) - @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr) + @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr)) else object.source_address_lookup[target.sym_index]; addend -= base_addr; @@ -929,17 +929,17 @@ fn resolveRelocsX86( const result = blk: { if (subtractor) |sub| { const sym = zld.getSymbol(sub); - break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + addend; + break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + addend; } else { - break :blk @intCast(i64, target_addr) + addend; + break :blk @as(i64, @intCast(target_addr)) + addend; } }; log.debug(" | target_addr = 0x{x}", .{result}); if (rel.r_length == 3) { - mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result)); + mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result))); } else { - mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result))); + mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result))))); } subtractor = null; @@ -958,19 +958,19 @@ pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 { // If there was no matching symbol present in the source symtab, this means // we are dealing with either an entire section, or part of it, but also // starting at the beginning. - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); const source_sect = object.getSourceSection(sect_id); assert(!source_sect.isZerofill()); const code = object.getSectionContents(source_sect); - const code_len = @intCast(usize, atom.size); + const code_len = @as(usize, @intCast(atom.size)); return code[0..code_len]; }; const source_sect = object.getSourceSection(source_sym.n_sect - 1); assert(!source_sect.isZerofill()); const code = object.getSectionContents(source_sect); - const offset = @intCast(usize, source_sym.n_value - source_sect.addr); - const code_len = @intCast(usize, atom.size); + const offset = @as(usize, @intCast(source_sym.n_value - source_sect.addr)); + const code_len = @as(usize, @intCast(atom.size)); return code[offset..][0..code_len]; } @@ -986,8 +986,8 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_ // If there was no matching symbol present in the source symtab, this means // we are dealing with either an entire section, or part of it, but also // starting at the beginning. - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk sect_id; }; const source_sect = object.getSourceSection(source_sect_id); @@ -998,14 +998,14 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_ pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool { switch (zld.options.target.cpu.arch) { - .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) { + .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12, .ARM64_RELOC_POINTER_TO_GOT, => return true, else => return false, }, - .x86_64 => switch (@enumFromInt(macho.reloc_type_x86_64, rel.r_type)) { + .x86_64 => switch (@as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type))) { .X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD, => return true, diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig index b2c569447d20..890b40ed854e 100644 --- a/src/link/MachO/dead_strip.zig +++ b/src/link/MachO/dead_strip.zig @@ -27,10 +27,10 @@ pub fn gcAtoms(zld: *Zld, resolver: *const SymbolResolver) !void { defer arena.deinit(); var roots = AtomTable.init(arena.allocator()); - try roots.ensureUnusedCapacity(@intCast(u32, zld.globals.items.len)); + try roots.ensureUnusedCapacity(@as(u32, @intCast(zld.globals.items.len))); var alive = AtomTable.init(arena.allocator()); - try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len)); + try alive.ensureTotalCapacity(@as(u32, @intCast(zld.atoms.items.len))); try collectRoots(zld, &roots, resolver); try mark(zld, roots, &alive); @@ -99,8 +99,8 @@ fn collectRoots(zld: *Zld, roots: *AtomTable, resolver: *const SymbolResolver) ! const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym| source_sym.n_sect - 1 else sect_id: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :sect_id sect_id; }; const source_sect = object.getSourceSection(sect_id); @@ -148,7 +148,7 @@ fn markLive(zld: *Zld, atom_index: AtomIndex, alive: *AtomTable) void { for (relocs) |rel| { const target = switch (cpu_arch) { - .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) { + .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_ADDEND => continue, else => Atom.parseRelocTarget(zld, .{ .object_id = atom.getFile().?, @@ -208,7 +208,7 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable) bool { for (relocs) |rel| { const target = switch (cpu_arch) { - .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) { + .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_ADDEND => continue, else => Atom.parseRelocTarget(zld, .{ .object_id = atom.getFile().?, @@ -264,8 +264,8 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void { const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym| source_sym.n_sect - 1 else blk: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk sect_id; }; const source_sect = object.getSourceSection(sect_id); @@ -283,7 +283,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void { for (zld.objects.items, 0..) |_, object_id| { // Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so, // marking all references as live. - try markUnwindRecords(zld, @intCast(u32, object_id), alive); + try markUnwindRecords(zld, @as(u32, @intCast(object_id)), alive); } } @@ -329,7 +329,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void { .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); const target_sym = zld.getSymbol(target); if (!target_sym.undf()) { @@ -344,7 +344,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void { .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), - .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)), + .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); const target_object = zld.objects.items[target.getFile().?]; const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?; @@ -377,7 +377,7 @@ fn markEhFrameRecord(zld: *Zld, object_id: u32, atom_index: AtomIndex, alive: *A .object_id = object_id, .rel = rel, .code = fde.data, - .base_offset = @intCast(i32, fde_offset) + 4, + .base_offset = @as(i32, @intCast(fde_offset)) + 4, }); const target_sym = zld.getSymbol(target); if (!target_sym.undf()) blk: { diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig index 5b386a81368b..0f3e96b02f72 100644 --- a/src/link/MachO/dyld_info/Rebase.zig +++ b/src/link/MachO/dyld_info/Rebase.zig @@ -31,7 +31,7 @@ pub fn deinit(rebase: *Rebase, gpa: Allocator) void { } pub fn size(rebase: Rebase) u64 { - return @intCast(u64, rebase.buffer.items.len); + return @as(u64, @intCast(rebase.buffer.items.len)); } pub fn finalize(rebase: *Rebase, gpa: Allocator) !void { @@ -145,12 +145,12 @@ fn finalizeSegment(entries: []const Entry, writer: anytype) !void { fn setTypePointer(writer: anytype) !void { log.debug(">>> set type: {d}", .{macho.REBASE_TYPE_POINTER}); - try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER)); + try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.REBASE_TYPE_POINTER))); } fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void { log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset }); - try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id)); + try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id))); try std.leb.writeULEB128(writer, offset); } @@ -163,7 +163,7 @@ fn rebaseAddAddr(addr: u64, writer: anytype) !void { fn rebaseTimes(count: usize, writer: anytype) !void { log.debug(">>> rebase with count: {d}", .{count}); if (count <= 0xf) { - try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, count)); + try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count))); } else { try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES); try std.leb.writeULEB128(writer, count); @@ -182,7 +182,7 @@ fn addAddr(addr: u64, writer: anytype) !void { if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) { const imm = @divExact(addr, @sizeOf(u64)); if (imm <= 0xf) { - try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @truncate(u4, imm)); + try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm))); return; } } diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig index 14ce1587aa79..f804c6466d2f 100644 --- a/src/link/MachO/dyld_info/bind.zig +++ b/src/link/MachO/dyld_info/bind.zig @@ -39,7 +39,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { } pub fn size(self: Self) u64 { - return @intCast(u64, self.buffer.items.len); + return @as(u64, @intCast(self.buffer.items.len)); } pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void { @@ -95,7 +95,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { const sym = ctx.getSymbol(current.target); const name = ctx.getSymbolName(current.target); const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0; - const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER); + const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER); try setSymbol(name, flags, writer); try setTypePointer(writer); @@ -112,7 +112,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { switch (state) { .start => { if (current.offset < offset) { - try addAddr(@bitCast(u64, @intCast(i64, current.offset) - @intCast(i64, offset)), writer); + try addAddr(@as(u64, @bitCast(@as(i64, @intCast(current.offset)) - @as(i64, @intCast(offset)))), writer); offset = offset - (offset - current.offset); } else if (current.offset > offset) { const delta = current.offset - offset; @@ -130,7 +130,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { } else if (current.offset > offset) { const delta = current.offset - offset; state = .bind_times_skip; - skip = @intCast(u64, delta); + skip = @as(u64, @intCast(delta)); offset += skip; } else unreachable; i -= 1; @@ -194,7 +194,7 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type { } pub fn size(self: Self) u64 { - return @intCast(u64, self.buffer.items.len); + return @as(u64, @intCast(self.buffer.items.len)); } pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void { @@ -208,12 +208,12 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type { var addend: i64 = 0; for (self.entries.items) |entry| { - self.offsets.appendAssumeCapacity(@intCast(u32, cwriter.bytes_written)); + self.offsets.appendAssumeCapacity(@as(u32, @intCast(cwriter.bytes_written))); const sym = ctx.getSymbol(entry.target); const name = ctx.getSymbolName(entry.target); const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0; - const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER); + const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER); try setSegmentOffset(entry.segment_id, entry.offset, writer); try setSymbol(name, flags, writer); @@ -238,20 +238,20 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type { fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void { log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset }); - try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id)); + try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id))); try std.leb.writeULEB128(writer, offset); } fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void { log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags }); - try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @truncate(u4, flags)); + try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags))); try writer.writeAll(name); try writer.writeByte(0); } fn setTypePointer(writer: anytype) !void { log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER}); - try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER)); + try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER))); } fn setDylibOrdinal(ordinal: i16, writer: anytype) !void { @@ -264,13 +264,13 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void { else => unreachable, // Invalid dylib special binding } log.debug(">>> set dylib special: {d}", .{ordinal}); - const cast = @bitCast(u16, ordinal); - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, cast)); + const cast = @as(u16, @bitCast(ordinal)); + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @as(u4, @truncate(cast))); } else { - const cast = @bitCast(u16, ordinal); + const cast = @as(u16, @bitCast(ordinal)); log.debug(">>> set dylib ordinal: {d}", .{ordinal}); if (cast <= 0xf) { - try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, cast)); + try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast))); } else { try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); try std.leb.writeULEB128(writer, cast); @@ -295,7 +295,7 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void { const imm = @divExact(addr, @sizeOf(u64)); if (imm <= 0xf) { try writer.writeByte( - macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @truncate(u4, imm), + macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)), ); return; } @@ -341,7 +341,7 @@ const TestContext = struct { fn addSymbol(ctx: *TestContext, gpa: Allocator, name: []const u8, ordinal: i16, flags: u16) !void { const n_strx = try ctx.addString(gpa, name); - var n_desc = @bitCast(u16, ordinal * macho.N_SYMBOL_RESOLVER); + var n_desc = @as(u16, @bitCast(ordinal * macho.N_SYMBOL_RESOLVER)); n_desc |= flags; try ctx.symbols.append(gpa, .{ .n_value = 0, @@ -353,7 +353,7 @@ const TestContext = struct { } fn addString(ctx: *TestContext, gpa: Allocator, name: []const u8) !u32 { - const n_strx = @intCast(u32, ctx.strtab.items.len); + const n_strx = @as(u32, @intCast(ctx.strtab.items.len)); try ctx.strtab.appendSlice(gpa, name); try ctx.strtab.append(gpa, 0); return n_strx; @@ -366,7 +366,7 @@ const TestContext = struct { fn getSymbolName(ctx: TestContext, target: Target) []const u8 { const sym = ctx.getSymbol(target); assert(sym.n_strx < ctx.strtab.items.len); - return std.mem.sliceTo(@ptrCast([*:0]const u8, ctx.strtab.items.ptr + sym.n_strx), 0); + return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + sym.n_strx)), 0); } }; diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig index 1672e372297c..eb4419cd7b8c 100644 --- a/src/link/MachO/eh_frame.zig +++ b/src/link/MachO/eh_frame.zig @@ -36,7 +36,7 @@ pub fn scanRelocs(zld: *Zld) !void { try cies.putNoClobber(cie_offset, {}); it.seekTo(cie_offset); const cie = (try it.next()).?; - try cie.scanRelocs(zld, @intCast(u32, object_id), cie_offset); + try cie.scanRelocs(zld, @as(u32, @intCast(object_id)), cie_offset); } } } @@ -110,7 +110,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { var eh_frame_offset: u32 = 0; for (zld.objects.items, 0..) |*object, object_id| { - try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len)); + try eh_records.ensureUnusedCapacity(2 * @as(u32, @intCast(object.exec_atoms.items.len))); var cies = std.AutoHashMap(u32, u32).init(gpa); defer cies.deinit(); @@ -139,7 +139,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { eh_it.seekTo(cie_offset); const source_cie_record = (try eh_it.next()).?; var cie_record = try source_cie_record.toOwned(gpa); - try cie_record.relocate(zld, @intCast(u32, object_id), .{ + try cie_record.relocate(zld, @as(u32, @intCast(object_id)), .{ .source_offset = cie_offset, .out_offset = eh_frame_offset, .sect_addr = sect.addr, @@ -151,7 +151,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { var fde_record = try source_fde_record.toOwned(gpa); fde_record.setCiePointer(eh_frame_offset + 4 - gop.value_ptr.*); - try fde_record.relocate(zld, @intCast(u32, object_id), .{ + try fde_record.relocate(zld, @as(u32, @intCast(object_id)), .{ .source_offset = fde_record_offset, .out_offset = eh_frame_offset, .sect_addr = sect.addr, @@ -194,7 +194,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void { UnwindInfo.UnwindEncoding.setDwarfSectionOffset( &record.compactUnwindEncoding, cpu_arch, - @intCast(u24, eh_frame_offset), + @as(u24, @intCast(eh_frame_offset)), ); const cie_record = eh_records.get( @@ -268,7 +268,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { }) u64 { assert(rec.tag == .fde); const addend = mem.readIntLittle(i64, rec.data[4..][0..8]); - return @intCast(u64, @intCast(i64, ctx.base_addr + ctx.base_offset + 8) + addend); + return @as(u64, @intCast(@as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)) + addend)); } pub fn setTargetSymbolAddress(rec: *Record, value: u64, ctx: struct { @@ -276,7 +276,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { base_offset: u64, }) !void { assert(rec.tag == .fde); - const addend = @intCast(i64, value) - @intCast(i64, ctx.base_addr + ctx.base_offset + 8); + const addend = @as(i64, @intCast(value)) - @as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)); mem.writeIntLittle(i64, rec.data[4..][0..8], addend); } @@ -291,7 +291,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { for (relocs) |rel| { switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_SUBTRACTOR, .ARM64_RELOC_UNSIGNED, @@ -301,7 +301,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { } }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_GOT => {}, else => unreachable, @@ -313,7 +313,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { .object_id = object_id, .rel = rel, .code = rec.data, - .base_offset = @intCast(i32, source_offset) + 4, + .base_offset = @as(i32, @intCast(source_offset)) + 4, }); return target; } @@ -335,40 +335,40 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { .object_id = object_id, .rel = rel, .code = rec.data, - .base_offset = @intCast(i32, ctx.source_offset) + 4, + .base_offset = @as(i32, @intCast(ctx.source_offset)) + 4, }); - const rel_offset = @intCast(u32, rel.r_address - @intCast(i32, ctx.source_offset) - 4); + const rel_offset = @as(u32, @intCast(rel.r_address - @as(i32, @intCast(ctx.source_offset)) - 4)); const source_addr = ctx.sect_addr + rel_offset + ctx.out_offset + 4; switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); switch (rel_type) { .ARM64_RELOC_SUBTRACTOR => { // Address of the __eh_frame in the source object file }, .ARM64_RELOC_POINTER_TO_GOT => { const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false); - const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse + const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse return error.Overflow; mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], result); }, .ARM64_RELOC_UNSIGNED => { assert(rel.r_extern == 1); const target_addr = try Atom.getRelocTargetAddress(zld, target, false, false); - const result = @intCast(i64, target_addr) - @intCast(i64, source_addr); - mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @intCast(i64, result)); + const result = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)); + mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @as(i64, @intCast(result))); }, else => unreachable, } }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_GOT => { const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false); const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]); - const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend); + const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp); }, @@ -392,7 +392,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { pub fn getAugmentationString(rec: Record) []const u8 { assert(rec.tag == .cie); - return mem.sliceTo(@ptrCast([*:0]const u8, rec.data.ptr + 5), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(rec.data.ptr + 5)), 0); } pub fn getPersonalityPointer(rec: Record, ctx: struct { @@ -418,7 +418,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { 'P' => { const enc = try reader.readByte(); const offset = ctx.base_offset + 13 + aug_str.len + creader.bytes_read; - const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader); + const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader); return ptr; }, 'L' => { @@ -441,7 +441,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { const reader = stream.reader(); _ = try reader.readByte(); const offset = ctx.base_offset + 25; - const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader); + const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader); return ptr; } @@ -454,7 +454,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { var stream = std.io.fixedBufferStream(rec.data[21..]); const writer = stream.writer(); const offset = ctx.base_offset + 25; - try setEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), value, writer); + try setEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), value, writer); } fn getLsdaEncoding(rec: Record) !?u8 { @@ -494,11 +494,11 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { if (enc == EH_PE.omit) return null; var ptr: i64 = switch (enc & 0x0F) { - EH_PE.absptr => @bitCast(i64, try reader.readIntLittle(u64)), - EH_PE.udata2 => @bitCast(i16, try reader.readIntLittle(u16)), - EH_PE.udata4 => @bitCast(i32, try reader.readIntLittle(u32)), - EH_PE.udata8 => @bitCast(i64, try reader.readIntLittle(u64)), - EH_PE.uleb128 => @bitCast(i64, try leb.readULEB128(u64, reader)), + EH_PE.absptr => @as(i64, @bitCast(try reader.readIntLittle(u64))), + EH_PE.udata2 => @as(i16, @bitCast(try reader.readIntLittle(u16))), + EH_PE.udata4 => @as(i32, @bitCast(try reader.readIntLittle(u32))), + EH_PE.udata8 => @as(i64, @bitCast(try reader.readIntLittle(u64))), + EH_PE.uleb128 => @as(i64, @bitCast(try leb.readULEB128(u64, reader))), EH_PE.sdata2 => try reader.readIntLittle(i16), EH_PE.sdata4 => try reader.readIntLittle(i32), EH_PE.sdata8 => try reader.readIntLittle(i64), @@ -517,13 +517,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { else => return null, } - return @bitCast(u64, ptr); + return @as(u64, @bitCast(ptr)); } fn setEncodedPointer(enc: u8, pcrel_offset: i64, value: u64, writer: anytype) !void { if (enc == EH_PE.omit) return; - var actual = @intCast(i64, value); + var actual = @as(i64, @intCast(value)); switch (enc & 0x70) { EH_PE.absptr => {}, @@ -537,13 +537,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { } switch (enc & 0x0F) { - EH_PE.absptr => try writer.writeIntLittle(u64, @bitCast(u64, actual)), - EH_PE.udata2 => try writer.writeIntLittle(u16, @bitCast(u16, @intCast(i16, actual))), - EH_PE.udata4 => try writer.writeIntLittle(u32, @bitCast(u32, @intCast(i32, actual))), - EH_PE.udata8 => try writer.writeIntLittle(u64, @bitCast(u64, actual)), - EH_PE.uleb128 => try leb.writeULEB128(writer, @bitCast(u64, actual)), - EH_PE.sdata2 => try writer.writeIntLittle(i16, @intCast(i16, actual)), - EH_PE.sdata4 => try writer.writeIntLittle(i32, @intCast(i32, actual)), + EH_PE.absptr => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))), + EH_PE.udata2 => try writer.writeIntLittle(u16, @as(u16, @bitCast(@as(i16, @intCast(actual))))), + EH_PE.udata4 => try writer.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(actual))))), + EH_PE.udata8 => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))), + EH_PE.uleb128 => try leb.writeULEB128(writer, @as(u64, @bitCast(actual))), + EH_PE.sdata2 => try writer.writeIntLittle(i16, @as(i16, @intCast(actual))), + EH_PE.sdata4 => try writer.writeIntLittle(i32, @as(i32, @intCast(actual))), EH_PE.sdata8 => try writer.writeIntLittle(i64, actual), EH_PE.sleb128 => try leb.writeILEB128(writer, actual), else => unreachable, diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index eb582e222285..10f446f19159 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -114,7 +114,7 @@ fn calcLCsSize(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx } } - return @intCast(u32, sizeofcmds); + return @as(u32, @intCast(sizeofcmds)); } pub fn calcMinHeaderPad(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx) !u64 { @@ -140,7 +140,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { var pos: usize = 0; while (true) { if (pos >= lc_buffer.len) break; - const cmd = @ptrCast(*align(1) const macho.load_command, lc_buffer.ptr + pos).*; + const cmd = @as(*align(1) const macho.load_command, @ptrCast(lc_buffer.ptr + pos)).*; ncmds += 1; pos += cmd.cmdsize; } @@ -149,11 +149,11 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { pub fn writeDylinkerLC(lc_writer: anytype) !void { const name_len = mem.sliceTo(default_dyld_path, 0).len; - const cmdsize = @intCast(u32, mem.alignForward( + const cmdsize = @as(u32, @intCast(mem.alignForward( u64, @sizeOf(macho.dylinker_command) + name_len, @sizeOf(u64), - )); + ))); try lc_writer.writeStruct(macho.dylinker_command{ .cmd = .LOAD_DYLINKER, .cmdsize = cmdsize, @@ -176,11 +176,11 @@ const WriteDylibLCCtx = struct { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { const name_len = ctx.name.len + 1; - const cmdsize = @intCast(u32, mem.alignForward( + const cmdsize = @as(u32, @intCast(mem.alignForward( u64, @sizeOf(macho.dylib_command) + name_len, @sizeOf(u64), - )); + ))); try lc_writer.writeStruct(macho.dylib_command{ .cmd = ctx.cmd, .cmdsize = cmdsize, @@ -217,8 +217,8 @@ pub fn writeDylibIdLC(gpa: Allocator, options: *const link.Options, lc_writer: a try writeDylibLC(.{ .cmd = .ID_DYLIB, .name = install_name, - .current_version = @intCast(u32, curr.major << 16 | curr.minor << 8 | curr.patch), - .compatibility_version = @intCast(u32, compat.major << 16 | compat.minor << 8 | compat.patch), + .current_version = @as(u32, @intCast(curr.major << 16 | curr.minor << 8 | curr.patch)), + .compatibility_version = @as(u32, @intCast(compat.major << 16 | compat.minor << 8 | compat.patch)), }, lc_writer); } @@ -253,11 +253,11 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an while (try it.next()) |rpath| { const rpath_len = rpath.len + 1; - const cmdsize = @intCast(u32, mem.alignForward( + const cmdsize = @as(u32, @intCast(mem.alignForward( u64, @sizeOf(macho.rpath_command) + rpath_len, @sizeOf(u64), - )); + ))); try lc_writer.writeStruct(macho.rpath_command{ .cmdsize = cmdsize, .path = @sizeOf(macho.rpath_command), @@ -275,12 +275,12 @@ pub fn writeBuildVersionLC(options: *const link.Options, lc_writer: anytype) !vo const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version); const platform_version = blk: { const ver = options.target.os.version_range.semver.min; - const platform_version = @intCast(u32, ver.major << 16 | ver.minor << 8); + const platform_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8)); break :blk platform_version; }; const sdk_version = if (options.native_darwin_sdk) |sdk| blk: { const ver = sdk.version; - const sdk_version = @intCast(u32, ver.major << 16 | ver.minor << 8); + const sdk_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8)); break :blk sdk_version; } else platform_version; const is_simulator_abi = options.target.abi == .simulator; diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index f3289e544b81..82d045122598 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -131,7 +131,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { log.debug("GROUP END at {d}", .{group_end}); // Insert thunk at group_end - const thunk_index = @intCast(u32, zld.thunks.items.len); + const thunk_index = @as(u32, @intCast(zld.thunks.items.len)); try zld.thunks.append(gpa, .{ .start_index = undefined, .len = 0 }); // Scan relocs in the group and create trampolines for any unreachable callsite. @@ -174,7 +174,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { } } - header.size = @intCast(u32, offset); + header.size = @as(u32, @intCast(offset)); } fn allocateThunk( @@ -223,7 +223,7 @@ fn scanRelocs( const base_offset = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: { const source_sect = object.getSourceSection(source_sym.n_sect - 1); - break :blk @intCast(i32, source_sym.n_value - source_sect.addr); + break :blk @as(i32, @intCast(source_sym.n_value - source_sect.addr)); } else 0; const code = Atom.getAtomCode(zld, atom_index); @@ -289,7 +289,7 @@ fn scanRelocs( } inline fn relocNeedsThunk(rel: macho.relocation_info) bool { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); return rel_type == .ARM64_RELOC_BRANCH26; } @@ -315,7 +315,7 @@ fn isReachable( if (!allocated.contains(target_atom_index)) return false; - const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset); + const source_addr = source_sym.n_value + @as(u32, @intCast(rel.r_address - base_offset)); const is_via_got = Atom.relocRequiresGot(zld, rel); const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable; _ = Relocation.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch @@ -349,7 +349,7 @@ fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex { const end_addr = start_addr + thunk.getSize(); if (start_addr <= sym.n_value and sym.n_value < end_addr) { - return @intCast(u32, i); + return @as(u32, @intCast(i)); } } return null; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 13c1ea73fa30..3e828984a96d 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -103,7 +103,7 @@ pub const Zld = struct { const cpu_arch = self.options.target.cpu.arch; const mtime: u64 = mtime: { const stat = file.stat() catch break :mtime 0; - break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000)); + break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000))); }; const file_stat = try file.stat(); const file_size = math.cast(usize, file_stat.size) orelse return error.Overflow; @@ -220,7 +220,7 @@ pub const Zld = struct { const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null); defer gpa.free(contents); - const dylib_id = @intCast(u16, self.dylibs.items.len); + const dylib_id = @as(u16, @intCast(self.dylibs.items.len)); var dylib = Dylib{ .weak = opts.weak }; dylib.parseFromBinary( @@ -535,7 +535,7 @@ pub const Zld = struct { pub fn createEmptyAtom(self: *Zld, sym_index: u32, size: u64, alignment: u32) !AtomIndex { const gpa = self.gpa; - const index = @intCast(AtomIndex, self.atoms.items.len); + const index = @as(AtomIndex, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); atom.* = Atom.empty; atom.sym_index = sym_index; @@ -596,7 +596,7 @@ pub const Zld = struct { const global_index = self.dyld_stub_binder_index orelse return; const target = self.globals.items[global_index]; const atom_index = try self.createGotAtom(); - const got_index = @intCast(u32, self.got_entries.items.len); + const got_index = @as(u32, @intCast(self.got_entries.items.len)); try self.got_entries.append(gpa, .{ .target = target, .atom_index = atom_index, @@ -874,7 +874,7 @@ pub const Zld = struct { } for (self.objects.items, 0..) |_, object_id| { - try self.resolveSymbolsInObject(@intCast(u32, object_id), resolver); + try self.resolveSymbolsInObject(@as(u32, @intCast(object_id)), resolver); } try self.resolveSymbolsInArchives(resolver); @@ -1024,7 +1024,7 @@ pub const Zld = struct { }; assert(offsets.items.len > 0); - const object_id = @intCast(u16, self.objects.items.len); + const object_id = @as(u16, @intCast(self.objects.items.len)); const object = archive.parseObject(gpa, cpu_arch, offsets.items[0]) catch |e| switch (e) { error.MismatchedCpuArchitecture => { log.err("CPU architecture mismatch found in {s}", .{archive.name}); @@ -1055,14 +1055,14 @@ pub const Zld = struct { for (self.dylibs.items, 0..) |dylib, id| { if (!dylib.symbols.contains(sym_name)) continue; - const dylib_id = @intCast(u16, id); + const dylib_id = @as(u16, @intCast(id)); if (!self.referenced_dylibs.contains(dylib_id)) { try self.referenced_dylibs.putNoClobber(self.gpa, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; sym.n_type |= macho.N_EXT; - sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER; + sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER; if (dylib.weak) { sym.n_desc |= macho.N_WEAK_REF; @@ -1099,9 +1099,9 @@ pub const Zld = struct { _ = resolver.unresolved.swapRemove(global_index); continue; } else if (allow_undef) { - const n_desc = @bitCast( + const n_desc = @as( u16, - macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @intCast(i16, macho.N_SYMBOL_RESOLVER), + @bitCast(macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @as(i16, @intCast(macho.N_SYMBOL_RESOLVER))), ); sym.n_type = macho.N_EXT; sym.n_desc = n_desc; @@ -1238,7 +1238,7 @@ pub const Zld = struct { const segname = header.segName(); const segment_id = self.getSegmentByName(segname) orelse blk: { log.debug("creating segment '{s}'", .{segname}); - const segment_id = @intCast(u8, self.segments.items.len); + const segment_id = @as(u8, @intCast(self.segments.items.len)); const protection = getSegmentMemoryProtection(segname); try self.segments.append(self.gpa, .{ .cmdsize = @sizeOf(macho.segment_command_64), @@ -1269,7 +1269,7 @@ pub const Zld = struct { pub fn allocateSymbol(self: *Zld) !u32 { try self.locals.ensureUnusedCapacity(self.gpa, 1); log.debug(" (allocating symbol index {d})", .{self.locals.items.len}); - const index = @intCast(u32, self.locals.items.len); + const index = @as(u32, @intCast(self.locals.items.len)); _ = self.locals.addOneAssumeCapacity(); self.locals.items[index] = .{ .n_strx = 0, @@ -1282,7 +1282,7 @@ pub const Zld = struct { } fn addGlobal(self: *Zld, sym_loc: SymbolWithLoc) !u32 { - const global_index = @intCast(u32, self.globals.items.len); + const global_index = @as(u32, @intCast(self.globals.items.len)); try self.globals.append(self.gpa, sym_loc); return global_index; } @@ -1489,7 +1489,7 @@ pub const Zld = struct { if (mem.eql(u8, header.sectName(), "__stub_helper")) continue; // Create jump/branch range extenders if needed. - try thunks.createThunks(self, @intCast(u8, sect_id)); + try thunks.createThunks(self, @as(u8, @intCast(sect_id))); } } } @@ -1502,7 +1502,7 @@ pub const Zld = struct { .dylibs = self.dylibs.items, .referenced_dylibs = self.referenced_dylibs.keys(), }) else 0; - try self.allocateSegment(@intCast(u8, segment_index), base_size); + try self.allocateSegment(@as(u8, @intCast(segment_index)), base_size); } } @@ -1536,12 +1536,12 @@ pub const Zld = struct { for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { const alignment = try math.powi(u32, 2, header.@"align"); const start_aligned = mem.alignForward(u64, start, alignment); - const n_sect = @intCast(u8, indexes.start + sect_id + 1); + const n_sect = @as(u8, @intCast(indexes.start + sect_id + 1)); header.offset = if (header.isZerofill()) 0 else - @intCast(u32, segment.fileoff + start_aligned); + @as(u32, @intCast(segment.fileoff + start_aligned)); header.addr = segment.vmaddr + start_aligned; var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id]; @@ -1617,7 +1617,7 @@ pub const Zld = struct { ) !u8 { const gpa = self.gpa; log.debug("creating section '{s},{s}'", .{ segname, sectname }); - const index = @intCast(u8, self.sections.slice().len); + const index = @as(u8, @intCast(self.sections.slice().len)); try self.sections.append(gpa, .{ .segment_index = undefined, // Segments will be created automatically later down the pipeline .header = .{ @@ -1673,12 +1673,12 @@ pub const Zld = struct { }, } }; - return (@intCast(u8, segment_precedence) << 4) + section_precedence; + return (@as(u8, @intCast(segment_precedence)) << 4) + section_precedence; } fn writeSegmentHeaders(self: *Zld, writer: anytype) !void { for (self.segments.items, 0..) |seg, i| { - const indexes = self.getSectionIndexes(@intCast(u8, i)); + const indexes = self.getSectionIndexes(@as(u8, @intCast(i))); var out_seg = seg; out_seg.cmdsize = @sizeOf(macho.segment_command_64); out_seg.nsects = 0; @@ -1790,7 +1790,7 @@ pub const Zld = struct { } const segment_index = slice.items(.segment_index)[sect_id]; - const segment = self.getSegment(@intCast(u8, sect_id)); + const segment = self.getSegment(@as(u8, @intCast(sect_id))); if (segment.maxprot & macho.PROT.WRITE == 0) continue; log.debug("{s},{s}", .{ header.segName(), header.sectName() }); @@ -1820,12 +1820,12 @@ pub const Zld = struct { for (relocs) |rel| { switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); if (rel_type != .ARM64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); if (rel_type != .X86_64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, @@ -1841,9 +1841,9 @@ pub const Zld = struct { const target_sym = self.getSymbol(target); if (target_sym.undf()) continue; - const base_offset = @intCast(i32, sym.n_value - segment.vmaddr); + const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr)); const rel_offset = rel.r_address - ctx.base_offset; - const offset = @intCast(u64, base_offset + rel_offset); + const offset = @as(u64, @intCast(base_offset + rel_offset)); log.debug(" | rebase at {x}", .{offset}); try rebase.entries.append(self.gpa, .{ @@ -1882,7 +1882,7 @@ pub const Zld = struct { const sym = entry.getAtomSymbol(self); const base_offset = sym.n_value - seg.vmaddr; - const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER); + const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER); log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ base_offset, bind_sym_name, @@ -1929,7 +1929,7 @@ pub const Zld = struct { } const segment_index = slice.items(.segment_index)[sect_id]; - const segment = self.getSegment(@intCast(u8, sect_id)); + const segment = self.getSegment(@as(u8, @intCast(sect_id))); if (segment.maxprot & macho.PROT.WRITE == 0) continue; const cpu_arch = self.options.target.cpu.arch; @@ -1959,12 +1959,12 @@ pub const Zld = struct { for (relocs) |rel| { switch (cpu_arch) { .aarch64 => { - const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type); + const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)); if (rel_type != .ARM64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, .x86_64 => { - const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type); + const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); if (rel_type != .X86_64_RELOC_UNSIGNED) continue; if (rel.r_length != 3) continue; }, @@ -1983,11 +1983,11 @@ pub const Zld = struct { if (!bind_sym.undf()) continue; const base_offset = sym.n_value - segment.vmaddr; - const rel_offset = @intCast(u32, rel.r_address - ctx.base_offset); - const offset = @intCast(u64, base_offset + rel_offset); + const rel_offset = @as(u32, @intCast(rel.r_address - ctx.base_offset)); + const offset = @as(u64, @intCast(base_offset + rel_offset)); const addend = mem.readIntLittle(i64, code[rel_offset..][0..8]); - const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER); + const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER); log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{ base_offset, bind_sym_name, @@ -2039,7 +2039,7 @@ pub const Zld = struct { const stub_entry = self.stubs.items[count]; const bind_sym = stub_entry.getTargetSymbol(self); const bind_sym_name = stub_entry.getTargetSymbolName(self); - const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER); + const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER); log.debug(" | lazy bind at {x}, import('{s}') in dylib({d})", .{ base_offset, bind_sym_name, @@ -2165,14 +2165,14 @@ pub const Zld = struct { try self.file.pwriteAll(buffer, rebase_off); try self.populateLazyBindOffsetsInStubHelper(lazy_bind); - self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off); - self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned); - self.dyld_info_cmd.bind_off = @intCast(u32, bind_off); - self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned); - self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off); - self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned); - self.dyld_info_cmd.export_off = @intCast(u32, export_off); - self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned); + self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off)); + self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned)); + self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off)); + self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned)); + self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off)); + self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned)); + self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off)); + self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned)); } fn populateLazyBindOffsetsInStubHelper(self: *Zld, lazy_bind: LazyBind) !void { @@ -2246,7 +2246,7 @@ pub const Zld = struct { var last_off: u32 = 0; for (addresses.items) |addr| { - const offset = @intCast(u32, addr - text_seg.vmaddr); + const offset = @as(u32, @intCast(addr - text_seg.vmaddr)); const diff = offset - last_off; if (diff == 0) continue; @@ -2258,7 +2258,7 @@ pub const Zld = struct { var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); - const max_size = @intCast(usize, offsets.items.len * @sizeOf(u64)); + const max_size = @as(usize, @intCast(offsets.items.len * @sizeOf(u64))); try buffer.ensureTotalCapacity(max_size); for (offsets.items) |offset| { @@ -2281,8 +2281,8 @@ pub const Zld = struct { try self.file.pwriteAll(buffer.items, offset); - self.function_starts_cmd.dataoff = @intCast(u32, offset); - self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned); + self.function_starts_cmd.dataoff = @as(u32, @intCast(offset)); + self.function_starts_cmd.datasize = @as(u32, @intCast(needed_size_aligned)); } fn filterDataInCode( @@ -2324,8 +2324,8 @@ pub const Zld = struct { const source_addr = if (object.getSourceSymbol(atom.sym_index)) |source_sym| source_sym.n_value else blk: { - const nbase = @intCast(u32, object.in_symtab.?.len); - const source_sect_id = @intCast(u8, atom.sym_index - nbase); + const nbase = @as(u32, @intCast(object.in_symtab.?.len)); + const source_sect_id = @as(u8, @intCast(atom.sym_index - nbase)); break :blk object.getSourceSection(source_sect_id).addr; }; const filtered_dice = filterDataInCode(dice, source_addr, source_addr + atom.size); @@ -2363,8 +2363,8 @@ pub const Zld = struct { try self.file.pwriteAll(buffer, offset); - self.data_in_code_cmd.dataoff = @intCast(u32, offset); - self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned); + self.data_in_code_cmd.dataoff = @as(u32, @intCast(offset)); + self.data_in_code_cmd.datasize = @as(u32, @intCast(needed_size_aligned)); } fn writeSymtabs(self: *Zld) !void { @@ -2428,7 +2428,7 @@ pub const Zld = struct { if (!sym.undf()) continue; // not an import, skip if (sym.n_desc == N_DEAD) continue; - const new_index = @intCast(u32, imports.items.len); + const new_index = @as(u32, @intCast(imports.items.len)); var out_sym = sym; out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global)); try imports.append(out_sym); @@ -2443,9 +2443,9 @@ pub const Zld = struct { } } - const nlocals = @intCast(u32, locals.items.len); - const nexports = @intCast(u32, exports.items.len); - const nimports = @intCast(u32, imports.items.len); + const nlocals = @as(u32, @intCast(locals.items.len)); + const nexports = @as(u32, @intCast(exports.items.len)); + const nimports = @as(u32, @intCast(imports.items.len)); const nsyms = nlocals + nexports + nimports; const seg = self.getLinkeditSegmentPtr(); @@ -2465,7 +2465,7 @@ pub const Zld = struct { log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); try self.file.pwriteAll(buffer.items, offset); - self.symtab_cmd.symoff = @intCast(u32, offset); + self.symtab_cmd.symoff = @as(u32, @intCast(offset)); self.symtab_cmd.nsyms = nsyms; return SymtabCtx{ @@ -2493,8 +2493,8 @@ pub const Zld = struct { try self.file.pwriteAll(buffer, offset); - self.symtab_cmd.stroff = @intCast(u32, offset); - self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned); + self.symtab_cmd.stroff = @as(u32, @intCast(offset)); + self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned)); } const SymtabCtx = struct { @@ -2506,8 +2506,8 @@ pub const Zld = struct { fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void { const gpa = self.gpa; - const nstubs = @intCast(u32, self.stubs.items.len); - const ngot_entries = @intCast(u32, self.got_entries.items.len); + const nstubs = @as(u32, @intCast(self.stubs.items.len)); + const ngot_entries = @as(u32, @intCast(self.got_entries.items.len)); const nindirectsyms = nstubs * 2 + ngot_entries; const iextdefsym = ctx.nlocalsym; const iundefsym = iextdefsym + ctx.nextdefsym; @@ -2572,7 +2572,7 @@ pub const Zld = struct { self.dysymtab_cmd.nextdefsym = ctx.nextdefsym; self.dysymtab_cmd.iundefsym = iundefsym; self.dysymtab_cmd.nundefsym = ctx.nundefsym; - self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset); + self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset)); self.dysymtab_cmd.nindirectsyms = nindirectsyms; } @@ -2599,8 +2599,8 @@ pub const Zld = struct { // except for code signature data. try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1); - self.codesig_cmd.dataoff = @intCast(u32, offset); - self.codesig_cmd.datasize = @intCast(u32, needed_size); + self.codesig_cmd.dataoff = @as(u32, @intCast(offset)); + self.codesig_cmd.datasize = @as(u32, @intCast(needed_size)); } fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void { @@ -2689,7 +2689,7 @@ pub const Zld = struct { fn getSegmentByName(self: Zld, segname: []const u8) ?u8 { for (self.segments.items, 0..) |seg, i| { - if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i); + if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i)); } else return null; } @@ -2714,15 +2714,15 @@ pub const Zld = struct { // TODO investigate caching with a hashmap for (self.sections.items(.header), 0..) |header, i| { if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname)) - return @intCast(u8, i); + return @as(u8, @intCast(i)); } else return null; } pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } { var start: u8 = 0; const nsects = for (self.segments.items, 0..) |seg, i| { - if (i == segment_index) break @intCast(u8, seg.nsects); - start += @intCast(u8, seg.nsects); + if (i == segment_index) break @as(u8, @intCast(seg.nsects)); + start += @as(u8, @intCast(seg.nsects)); } else 0; return .{ .start = start, .end = start + nsects }; } @@ -2879,7 +2879,7 @@ pub const Zld = struct { var name_lookup: ?DwarfInfo.SubprogramLookupByName = if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS == 0) blk: { var name_lookup = DwarfInfo.SubprogramLookupByName.init(gpa); errdefer name_lookup.deinit(); - try name_lookup.ensureUnusedCapacity(@intCast(u32, object.atoms.items.len)); + try name_lookup.ensureUnusedCapacity(@as(u32, @intCast(object.atoms.items.len))); try debug_info.genSubprogramLookupByName(compile_unit, lookup, &name_lookup); break :blk name_lookup; } else null; @@ -3069,7 +3069,7 @@ pub const Zld = struct { @memset(&buf, '_'); scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{ sym_id, - object.getSymbolName(@intCast(u32, sym_id)), + object.getSymbolName(@as(u32, @intCast(sym_id))), sym.n_value, sym.n_sect, logSymAttributes(sym, &buf), @@ -3252,7 +3252,7 @@ pub const Zld = struct { } }; -pub const N_DEAD: u16 = @bitCast(u16, @as(i16, -1)); +pub const N_DEAD: u16 = @as(u16, @bitCast(@as(i16, -1))); const Section = struct { header: macho.section_64, @@ -3791,7 +3791,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr } for (zld.objects.items, 0..) |*object, object_id| { - try object.splitIntoAtoms(&zld, @intCast(u32, object_id)); + try object.splitIntoAtoms(&zld, @as(u32, @intCast(object_id))); } if (gc_sections) { @@ -3929,7 +3929,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr } else sym.n_value; try lc_writer.writeStruct(macho.entry_point_command{ - .entryoff = @intCast(u32, addr - seg.vmaddr), + .entryoff = @as(u32, @intCast(addr - seg.vmaddr)), .stacksize = options.stack_size_override orelse 0, }); } else { @@ -3943,7 +3943,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr }); try load_commands.writeBuildVersionLC(zld.options, lc_writer); - const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len); + const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len)); try lc_writer.writeStruct(zld.uuid_cmd); try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer); @@ -3954,7 +3954,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr const ncmds = load_commands.calcNumOfLCs(lc_buffer.items); try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64)); - try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len)); + try zld.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len))); try zld.writeUuid(comp, uuid_cmd_offset, requires_codesig); if (codesig) |*csig| { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index b36e16452ec1..ad5292aa8859 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -295,7 +295,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { .sym_index = blk: { try self.syms.append(gpa, undefined); try self.syms.append(gpa, undefined); - break :blk @intCast(u32, self.syms.items.len - 1); + break :blk @as(u32, @intCast(self.syms.items.len - 1)); }, }; try fn_map_res.value_ptr.functions.put(gpa, decl_index, out); @@ -485,7 +485,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ .none = {} }, .{ - .parent_atom_index = @intCast(Atom.Index, atom_idx), + .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), }); const code = switch (res) { .ok => code_buffer.items, @@ -562,10 +562,10 @@ pub fn flush(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.Node) li pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void { if (delta_line > 0 and delta_line < 65) { - const toappend = @intCast(u8, delta_line); + const toappend = @as(u8, @intCast(delta_line)); try l.append(toappend); } else if (delta_line < 0 and delta_line > -65) { - const toadd: u8 = @intCast(u8, -delta_line + 64); + const toadd: u8 = @as(u8, @intCast(-delta_line + 64)); try l.append(toadd); } else if (delta_line != 0) { try l.append(0); @@ -675,7 +675,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const out = entry.value_ptr.*; { // connect the previous decl to the next - const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount); + const delta_line = @as(i32, @intCast(out.start_line)) - @as(i32, @intCast(linecount)); try changeLine(&linecountinfo, delta_line); // TODO change the pc too (maybe?) @@ -692,7 +692,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No atom.offset = off; log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -721,7 +721,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No text_i += code.len; text_atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -749,7 +749,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -772,7 +772,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -792,7 +792,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; data_atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } @@ -815,13 +815,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No // generate the header self.hdr = .{ .magic = self.magic, - .text = @intCast(u32, text_i), - .data = @intCast(u32, data_i), - .syms = @intCast(u32, syms.len), + .text = @as(u32, @intCast(text_i)), + .data = @as(u32, @intCast(data_i)), + .syms = @as(u32, @intCast(syms.len)), .bss = 0, .spsz = 0, - .pcsz = @intCast(u32, linecountinfo.items.len), - .entry = @intCast(u32, self.entry_val.?), + .pcsz = @as(u32, @intCast(linecountinfo.items.len)), + .entry = @as(u32, @intCast(self.entry_val.?)), }; @memcpy(hdr_slice, self.hdr.toU8s()[0..hdr_size]); // write the fat header for 64 bit entry points @@ -847,13 +847,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const code = source_atom.code.getCode(self); if (reloc.pcrel) { - const disp = @intCast(i32, target_offset) - @intCast(i32, source_atom.offset.?) - 4 - @intCast(i32, offset); - mem.writeInt(i32, code[@intCast(usize, offset)..][0..4], @intCast(i32, disp), self.base.options.target.cpu.arch.endian()); + const disp = @as(i32, @intCast(target_offset)) - @as(i32, @intCast(source_atom.offset.?)) - 4 - @as(i32, @intCast(offset)); + mem.writeInt(i32, code[@as(usize, @intCast(offset))..][0..4], @as(i32, @intCast(disp)), self.base.options.target.cpu.arch.endian()); } else { if (!self.sixtyfour_bit) { - mem.writeInt(u32, code[@intCast(usize, offset)..][0..4], @intCast(u32, target_offset + addend), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, code[@as(usize, @intCast(offset))..][0..4], @as(u32, @intCast(target_offset + addend)), self.base.options.target.cpu.arch.endian()); } else { - mem.writeInt(u64, code[@intCast(usize, offset)..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, code[@as(usize, @intCast(offset))..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian()); } } log.debug("relocating the address of '{s}' + {d} into '{s}' + {d} (({s}[{d}] = 0x{x} + 0x{x})", .{ target_symbol.name, addend, source_atom_symbol.name, offset, source_atom_symbol.name, offset, target_offset, addend }); @@ -960,7 +960,7 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void { fn createAtom(self: *Plan9) !Atom.Index { const gpa = self.base.allocator; - const index = @intCast(Atom.Index, self.atoms.items.len); + const index = @as(Atom.Index, @intCast(self.atoms.items.len)); const atom = try self.atoms.addOne(gpa); atom.* = .{ .type = .t, @@ -1060,7 +1060,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = @intCast(Atom.Index, atom_index) }, + .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_index)) }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1188,7 +1188,7 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void { // log.debug("write sym{{name: {s}, value: {x}}}", .{ sym.name, sym.value }); if (sym.type == .bad) return; // we don't want to write free'd symbols if (!self.sixtyfour_bit) { - try w.writeIntBig(u32, @intCast(u32, sym.value)); + try w.writeIntBig(u32, @as(u32, @intCast(sym.value))); } else { try w.writeIntBig(u64, sym.value); } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 647450a603d4..97a05a6e4ac1 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -317,7 +317,7 @@ pub const StringTable = struct { } try table.string_data.ensureUnusedCapacity(allocator, string.len + 1); - const offset = @intCast(u32, table.string_data.items.len); + const offset = @as(u32, @intCast(table.string_data.items.len)); log.debug("writing new string '{s}' at offset 0x{x}", .{ string, offset }); @@ -333,7 +333,7 @@ pub const StringTable = struct { /// Asserts offset does not exceed bounds. pub fn get(table: StringTable, off: u32) []const u8 { assert(off < table.string_data.items.len); - return mem.sliceTo(@ptrCast([*:0]const u8, table.string_data.items.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(table.string_data.items.ptr + off)), 0); } /// Returns the offset of a given string when it exists. @@ -396,7 +396,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option // For object files we will import the stack pointer symbol if (options.output_mode == .Obj) { symbol.setUndefined(true); - symbol.index = @intCast(u32, wasm_bin.imported_globals_count); + symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count)); wasm_bin.imported_globals_count += 1; try wasm_bin.imports.putNoClobber( allocator, @@ -408,7 +408,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option }, ); } else { - symbol.index = @intCast(u32, wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len); + symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len)); symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); const global = try wasm_bin.wasm_globals.addOne(allocator); global.* = .{ @@ -431,7 +431,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option }; if (options.output_mode == .Obj or options.import_table) { symbol.setUndefined(true); - symbol.index = @intCast(u32, wasm_bin.imported_tables_count); + symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count)); wasm_bin.imported_tables_count += 1; try wasm_bin.imports.put(allocator, loc, .{ .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name), @@ -439,7 +439,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option .kind = .{ .table = table }, }); } else { - symbol.index = @intCast(u32, wasm_bin.imported_tables_count + wasm_bin.tables.items.len); + symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count + wasm_bin.tables.items.len)); try wasm_bin.tables.append(allocator, table); if (options.export_table) { symbol.setFlag(.WASM_SYM_EXPORTED); @@ -519,7 +519,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol } fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc { - const sym_index = @intCast(u32, wasm.symbols.items.len); + const sym_index = @as(u32, @intCast(wasm.symbols.items.len)); const loc: SymbolLoc = .{ .index = sym_index, .file = null }; try wasm.symbols.append(wasm.base.allocator, .{ .name = name_offset, @@ -588,7 +588,7 @@ pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom. /// Creates a new empty `Atom` and returns its `Atom.Index` fn createAtom(wasm: *Wasm) !Atom.Index { - const index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = Atom.empty; atom.sym_index = try wasm.allocateSymbol(); @@ -669,7 +669,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void { log.debug("Resolving symbols in object: '{s}'", .{object.name}); for (object.symtable, 0..) |symbol, i| { - const sym_index = @intCast(u32, i); + const sym_index = @as(u32, @intCast(i)); const location: SymbolLoc = .{ .file = object_index, .index = sym_index, @@ -830,7 +830,7 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { // Symbol is found in unparsed object file within current archive. // Parse object and and resolve symbols again before we check remaining // undefined symbols. - const object_file_index = @intCast(u16, wasm.objects.items.len); + const object_file_index = @as(u16, @intCast(wasm.objects.items.len)); var object = try archive.parseObject(wasm.base.allocator, offset.items[0]); try wasm.objects.append(wasm.base.allocator, object); try wasm.resolveSymbolsInObject(object_file_index); @@ -1046,7 +1046,7 @@ fn setupTLSRelocationsFunction(wasm: *Wasm) !void { try writer.writeByte(std.wasm.opcode(.i32_add)); try writer.writeByte(std.wasm.opcode(.global_set)); - try leb.writeULEB128(writer, wasm.imported_globals_count + @intCast(u32, wasm.wasm_globals.items.len + got_index)); + try leb.writeULEB128(writer, wasm.imported_globals_count + @as(u32, @intCast(wasm.wasm_globals.items.len + got_index))); } try writer.writeByte(std.wasm.opcode(.end)); @@ -1091,7 +1091,7 @@ fn validateFeatures( // linked object file so we can test them. for (wasm.objects.items, 0..) |object, object_index| { for (object.features) |feature| { - const value = @intCast(u16, object_index) << 1 | @as(u1, 1); + const value = @as(u16, @intCast(object_index)) << 1 | @as(u1, 1); switch (feature.prefix) { .used => { used[@intFromEnum(feature.tag)] = value; @@ -1117,12 +1117,12 @@ fn validateFeatures( // and insert it into the 'allowed' set. When features are not inferred, // we validate that a used feature is allowed. for (used, 0..) |used_set, used_index| { - const is_enabled = @truncate(u1, used_set) != 0; + const is_enabled = @as(u1, @truncate(used_set)) != 0; if (infer) { allowed[used_index] = is_enabled; emit_features_count.* += @intFromBool(is_enabled); } else if (is_enabled and !allowed[used_index]) { - log.err("feature '{}' not allowed, but used by linked object", .{@enumFromInt(types.Feature.Tag, used_index)}); + log.err("feature '{}' not allowed, but used by linked object", .{@as(types.Feature.Tag, @enumFromInt(used_index))}); log.err(" defined in '{s}'", .{wasm.objects.items[used_set >> 1].name}); valid_feature_set = false; } @@ -1134,7 +1134,7 @@ fn validateFeatures( if (wasm.base.options.shared_memory) { const disallowed_feature = disallowed[@intFromEnum(types.Feature.Tag.shared_mem)]; - if (@truncate(u1, disallowed_feature) != 0) { + if (@as(u1, @truncate(disallowed_feature)) != 0) { log.err( "shared-memory is disallowed by '{s}' because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled", .{wasm.objects.items[disallowed_feature >> 1].name}, @@ -1163,7 +1163,7 @@ fn validateFeatures( if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set. // from here a feature is always used const disallowed_feature = disallowed[@intFromEnum(feature.tag)]; - if (@truncate(u1, disallowed_feature) != 0) { + if (@as(u1, @truncate(disallowed_feature)) != 0) { log.err("feature '{}' is disallowed, but used by linked object", .{feature.tag}); log.err(" disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name}); log.err(" used in '{s}'", .{object.name}); @@ -1175,9 +1175,9 @@ fn validateFeatures( // validate the linked object file has each required feature for (required, 0..) |required_feature, feature_index| { - const is_required = @truncate(u1, required_feature) != 0; + const is_required = @as(u1, @truncate(required_feature)) != 0; if (is_required and !object_used_features[feature_index]) { - log.err("feature '{}' is required but not used in linked object", .{@enumFromInt(types.Feature.Tag, feature_index)}); + log.err("feature '{}' is required but not used in linked object", .{@as(types.Feature.Tag, @enumFromInt(feature_index))}); log.err(" required by '{s}'", .{wasm.objects.items[required_feature >> 1].name}); log.err(" missing in '{s}'", .{object.name}); valid_feature_set = false; @@ -1333,7 +1333,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { wasm.symbols.items[index] = symbol; return index; } - const index = @intCast(u32, wasm.symbols.items.len); + const index = @as(u32, @intCast(wasm.symbols.items.len)); wasm.symbols.appendAssumeCapacity(symbol); return index; } @@ -1485,7 +1485,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); if (code.len == 0) return; atom.alignment = decl.getAlignment(mod); } @@ -1589,7 +1589,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In }; const atom = wasm.getAtomPtr(atom_index); - atom.size = @intCast(u32, code.len); + atom.size = @as(u32, @intCast(code.len)); try atom.code.appendSlice(wasm.base.allocator, code); return atom.sym_index; } @@ -1617,7 +1617,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3 symbol.setUndefined(true); const sym_index = if (wasm.symbols_free_list.popOrNull()) |index| index else blk: { - var index = @intCast(u32, wasm.symbols.items.len); + var index = @as(u32, @intCast(wasm.symbols.items.len)); try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1); wasm.symbols.items.len += 1; break :blk index; @@ -1654,15 +1654,15 @@ pub fn getDeclVAddr( try wasm.addTableFunction(target_symbol_index); try atom.relocs.append(wasm.base.allocator, .{ .index = target_symbol_index, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64, }); } else { try atom.relocs.append(wasm.base.allocator, .{ .index = target_symbol_index, - .offset = @intCast(u32, reloc_info.offset), + .offset = @as(u32, @intCast(reloc_info.offset)), .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64, - .addend = @intCast(i32, reloc_info.addend), + .addend = @as(i32, @intCast(reloc_info.addend)), }); } // we do not know the final address at this point, @@ -1840,7 +1840,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void { /// Appends a new entry to the indirect function table pub fn addTableFunction(wasm: *Wasm, symbol_index: u32) !void { - const index = @intCast(u32, wasm.function_table.count()); + const index = @as(u32, @intCast(wasm.function_table.count())); try wasm.function_table.put(wasm.base.allocator, .{ .file = null, .index = symbol_index }, index); } @@ -1971,7 +1971,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm); const final_index: u32 = switch (kind) { .function => result: { - const index = @intCast(u32, wasm.functions.count() + wasm.imported_functions_count); + const index = @as(u32, @intCast(wasm.functions.count() + wasm.imported_functions_count)); const type_index = wasm.atom_types.get(atom_index).?; try wasm.functions.putNoClobber( wasm.base.allocator, @@ -1982,7 +1982,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { symbol.index = index; if (wasm.code_section_index == null) { - wasm.code_section_index = @intCast(u32, wasm.segments.items.len); + wasm.code_section_index = @as(u32, @intCast(wasm.segments.items.len)); try wasm.segments.append(wasm.base.allocator, .{ .alignment = atom.alignment, .size = atom.size, @@ -2020,12 +2020,12 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { const index = gop.value_ptr.*; wasm.segments.items[index].size += atom.size; - symbol.index = @intCast(u32, wasm.segment_info.getIndex(index).?); + symbol.index = @as(u32, @intCast(wasm.segment_info.getIndex(index).?)); // segment info already exists, so free its memory wasm.base.allocator.free(segment_name); break :result index; } else { - const index = @intCast(u32, wasm.segments.items.len); + const index = @as(u32, @intCast(wasm.segments.items.len)); var flags: u32 = 0; if (wasm.base.options.shared_memory) { flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE); @@ -2038,7 +2038,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { }); gop.value_ptr.* = index; - const info_index = @intCast(u32, wasm.segment_info.count()); + const info_index = @as(u32, @intCast(wasm.segment_info.count())); try wasm.segment_info.put(wasm.base.allocator, index, segment_info); symbol.index = info_index; break :result index; @@ -2074,13 +2074,13 @@ fn allocateDebugAtoms(wasm: *Wasm) !void { const allocAtom = struct { fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void { const index = maybe_index.* orelse idx: { - const index = @intCast(u32, bin.segments.items.len); + const index = @as(u32, @intCast(bin.segments.items.len)); try bin.appendDummySegment(); maybe_index.* = index; break :idx index; }; const atom = bin.getAtomPtr(atom_index); - atom.size = @intCast(u32, atom.code.items.len); + atom.size = @as(u32, @intCast(atom.code.items.len)); bin.symbols.items[atom.sym_index].index = index; try bin.appendAtomAtIndex(index, atom_index); } @@ -2215,7 +2215,7 @@ fn setupInitFunctions(wasm: *Wasm) !void { log.debug("appended init func '{s}'\n", .{object.string_table.get(symbol.name)}); wasm.init_funcs.appendAssumeCapacity(.{ .index = init_func.symbol_index, - .file = @intCast(u16, file_index), + .file = @as(u16, @intCast(file_index)), .priority = init_func.priority, }); } @@ -2248,7 +2248,7 @@ fn setupErrorsLen(wasm: *Wasm) !void { atom.deinit(wasm); break :blk index; } else new_atom: { - const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); try wasm.symbol_atom.put(wasm.base.allocator, loc, atom_index); try wasm.managed_atoms.append(wasm.base.allocator, undefined); break :new_atom atom_index; @@ -2257,7 +2257,7 @@ fn setupErrorsLen(wasm: *Wasm) !void { atom.* = Atom.empty; atom.sym_index = loc.index; atom.size = 2; - try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @intCast(u16, errors_len)); + try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @as(u16, @intCast(errors_len))); try wasm.parseAtom(atom_index, .{ .data = .read_only }); } @@ -2325,7 +2325,7 @@ fn createSyntheticFunction( const symbol = loc.getSymbol(wasm); const ty_index = try wasm.putOrGetFuncType(func_ty); // create function with above type - const func_index = wasm.imported_functions_count + @intCast(u32, wasm.functions.count()); + const func_index = wasm.imported_functions_count + @as(u32, @intCast(wasm.functions.count())); try wasm.functions.putNoClobber( wasm.base.allocator, .{ .file = null, .index = func_index }, @@ -2334,10 +2334,10 @@ fn createSyntheticFunction( symbol.index = func_index; // create the atom that will be output into the final binary - const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = .{ - .size = @intCast(u32, function_body.items.len), + .size = @as(u32, @intCast(function_body.items.len)), .offset = 0, .sym_index = loc.index, .file = null, @@ -2369,10 +2369,10 @@ pub fn createFunction( ) !u32 { const loc = try wasm.createSyntheticSymbol(symbol_name, .function); - const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len)); const atom = try wasm.managed_atoms.addOne(wasm.base.allocator); atom.* = .{ - .size = @intCast(u32, function_body.items.len), + .size = @as(u32, @intCast(function_body.items.len)), .offset = 0, .sym_index = loc.index, .file = null, @@ -2386,7 +2386,7 @@ pub fn createFunction( symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); // ensure function does not get exported const section_index = wasm.code_section_index orelse idx: { - const index = @intCast(u32, wasm.segments.items.len); + const index = @as(u32, @intCast(wasm.segments.items.len)); try wasm.appendDummySegment(); break :idx index; }; @@ -2438,7 +2438,7 @@ fn initializeTLSFunction(wasm: *Wasm) !void { try writer.writeByte(std.wasm.opcode(.misc_prefix)); try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_init)); // segment immediate - try leb.writeULEB128(writer, @intCast(u32, data_index)); + try leb.writeULEB128(writer, @as(u32, @intCast(data_index))); // memory index immediate (always 0) try leb.writeULEB128(writer, @as(u32, 0)); } @@ -2567,16 +2567,16 @@ fn mergeSections(wasm: *Wasm) !void { if (!gop.found_existing) { gop.value_ptr.* = object.functions[index]; } - symbol.index = @intCast(u32, gop.index) + wasm.imported_functions_count; + symbol.index = @as(u32, @intCast(gop.index)) + wasm.imported_functions_count; }, .global => { const original_global = object.globals[index]; - symbol.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + symbol.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, original_global); }, .table => { const original_table = object.tables[index]; - symbol.index = @intCast(u32, wasm.tables.items.len) + wasm.imported_tables_count; + symbol.index = @as(u32, @intCast(wasm.tables.items.len)) + wasm.imported_tables_count; try wasm.tables.append(wasm.base.allocator, original_table); }, else => unreachable, @@ -2596,7 +2596,7 @@ fn mergeTypes(wasm: *Wasm) !void { // type inserted. If we do this for the same function multiple times, // it will be overwritten with the incorrect type. var dirty = std.AutoHashMap(u32, void).init(wasm.base.allocator); - try dirty.ensureUnusedCapacity(@intCast(u32, wasm.functions.count())); + try dirty.ensureUnusedCapacity(@as(u32, @intCast(wasm.functions.count()))); defer dirty.deinit(); for (wasm.resolved_symbols.keys()) |sym_loc| { @@ -2660,10 +2660,10 @@ fn setupExports(wasm: *Wasm) !void { break :blk try wasm.string_table.put(wasm.base.allocator, sym_name); }; const exp: types.Export = if (symbol.tag == .data) exp: { - const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len); + const global_index = @as(u32, @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len)); try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @intCast(i32, symbol.virtual_address) }, + .init = .{ .i32_const = @as(i32, @intCast(symbol.virtual_address)) }, }); break :exp .{ .name = export_name, @@ -2734,10 +2734,10 @@ fn setupMemory(wasm: *Wasm) !void { memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; // We always put the stack pointer global at index 0 - wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); + wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); } - var offset: u32 = @intCast(u32, memory_ptr); + var offset: u32 = @as(u32, @intCast(memory_ptr)); var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; @@ -2747,26 +2747,26 @@ fn setupMemory(wasm: *Wasm) !void { if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { if (wasm.findGlobalSymbol("__tls_size")) |loc| { const sym = loc.getSymbol(wasm); - sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @intCast(i32, segment.size) }, + .init = .{ .i32_const = @as(i32, @intCast(segment.size)) }, }); } if (wasm.findGlobalSymbol("__tls_align")) |loc| { const sym = loc.getSymbol(wasm); - sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = false }, - .init = .{ .i32_const = @intCast(i32, segment.alignment) }, + .init = .{ .i32_const = @as(i32, @intCast(segment.alignment)) }, }); } if (wasm.findGlobalSymbol("__tls_base")) |loc| { const sym = loc.getSymbol(wasm); - sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count; + sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count; try wasm.wasm_globals.append(wasm.base.allocator, .{ .global_type = .{ .valtype = .i32, .mutable = wasm.base.options.shared_memory }, - .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @intCast(i32, memory_ptr) }, + .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @as(i32, @intCast(memory_ptr)) }, }); } } @@ -2782,21 +2782,21 @@ fn setupMemory(wasm: *Wasm) !void { memory_ptr = mem.alignForward(u64, memory_ptr, 4); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const sym = loc.getSymbol(wasm); - sym.virtual_address = @intCast(u32, memory_ptr); + sym.virtual_address = @as(u32, @intCast(memory_ptr)); memory_ptr += 4; } if (!place_stack_first and !is_obj) { memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; - wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); + wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr)))); } // One of the linked object files has a reference to the __heap_base symbol. // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment)); + symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment))); } // Setup the max amount of pages @@ -2821,12 +2821,12 @@ fn setupMemory(wasm: *Wasm) !void { memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. - wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); + wasm.memories.limits.min = @as(u32, @intCast(memory_ptr / page_size)); log.debug("Total memory pages: {d}", .{wasm.memories.limits.min}); if (wasm.findGlobalSymbol("__heap_end")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, memory_ptr); + symbol.virtual_address = @as(u32, @intCast(memory_ptr)); } if (wasm.base.options.max_memory) |max_memory| { @@ -2842,7 +2842,7 @@ fn setupMemory(wasm: *Wasm) !void { log.err("Maximum memory exceeds maxmium amount {d}", .{max_memory_allowed}); return error.MemoryTooBig; } - wasm.memories.limits.max = @intCast(u32, max_memory / page_size); + wasm.memories.limits.max = @as(u32, @intCast(max_memory / page_size)); wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_HAS_MAX); if (wasm.base.options.shared_memory) { wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_IS_SHARED); @@ -2857,7 +2857,7 @@ fn setupMemory(wasm: *Wasm) !void { pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32) !?u32 { const object: Object = wasm.objects.items[object_index]; const relocatable_data = object.relocatable_data[relocatable_index]; - const index = @intCast(u32, wasm.segments.items.len); + const index = @as(u32, @intCast(wasm.segments.items.len)); switch (relocatable_data.type) { .data => { @@ -3023,10 +3023,10 @@ fn populateErrorNameTable(wasm: *Wasm) !void { const mod = wasm.base.options.module.?; for (mod.global_error_set.keys()) |error_name_nts| { const error_name = mod.intern_pool.stringToSlice(error_name_nts); - const len = @intCast(u32, error_name.len + 1); // names are 0-termianted + const len = @as(u32, @intCast(error_name.len + 1)); // names are 0-termianted const slice_ty = Type.slice_const_u8_sentinel_0; - const offset = @intCast(u32, atom.code.items.len); + const offset = @as(u32, @intCast(atom.code.items.len)); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1); @@ -3035,9 +3035,9 @@ fn populateErrorNameTable(wasm: *Wasm) !void { .index = names_atom.sym_index, .relocation_type = .R_WASM_MEMORY_ADDR_I32, .offset = offset, - .addend = @intCast(i32, addend), + .addend = @as(i32, @intCast(addend)), }); - atom.size += @intCast(u32, slice_ty.abiSize(mod)); + atom.size += @as(u32, @intCast(slice_ty.abiSize(mod))); addend += len; // as we updated the error name table, we now store the actual name within the names atom @@ -3063,7 +3063,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { /// This initializes the index, appends a new segment, /// and finally, creates a managed `Atom`. pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index { - const new_index = @intCast(u32, wasm.segments.items.len); + const new_index = @as(u32, @intCast(wasm.segments.items.len)); index.* = new_index; try wasm.appendDummySegment(); @@ -3294,7 +3294,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l try wasm.parseInputFiles(positionals.items); for (wasm.objects.items, 0..) |_, object_index| { - try wasm.resolveSymbolsInObject(@intCast(u16, object_index)); + try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index))); } var emit_features_count: u32 = 0; @@ -3309,7 +3309,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l try wasm.setupImports(); for (wasm.objects.items, 0..) |*object, object_index| { - try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm); + try object.parseIntoAtoms(gpa, @as(u16, @intCast(object_index)), wasm); } try wasm.allocateAtoms(); @@ -3382,7 +3382,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod try wasm.parseInputFiles(positionals.items); for (wasm.objects.items, 0..) |_, object_index| { - try wasm.resolveSymbolsInObject(@intCast(u16, object_index)); + try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index))); } var emit_features_count: u32 = 0; @@ -3446,7 +3446,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod } for (wasm.objects.items, 0..) |*object, object_index| { - try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm); + try object.parseIntoAtoms(wasm.base.allocator, @as(u16, @intCast(object_index)), wasm); } try wasm.allocateAtoms(); @@ -3497,11 +3497,11 @@ fn writeToFile( log.debug("Writing type section. Count: ({d})", .{wasm.func_types.items.len}); for (wasm.func_types.items) |func_type| { try leb.writeULEB128(binary_writer, std.wasm.function_type); - try leb.writeULEB128(binary_writer, @intCast(u32, func_type.params.len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.params.len))); for (func_type.params) |param_ty| { try leb.writeULEB128(binary_writer, std.wasm.valtype(param_ty)); } - try leb.writeULEB128(binary_writer, @intCast(u32, func_type.returns.len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.returns.len))); for (func_type.returns) |ret_ty| { try leb.writeULEB128(binary_writer, std.wasm.valtype(ret_ty)); } @@ -3511,8 +3511,8 @@ fn writeToFile( binary_bytes.items, header_offset, .type, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.func_types.items.len), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.func_types.items.len)), ); section_count += 1; } @@ -3543,8 +3543,8 @@ fn writeToFile( binary_bytes.items, header_offset, .import, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.imports.count() + @intFromBool(import_memory)), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.imports.count() + @intFromBool(import_memory))), ); section_count += 1; } @@ -3560,8 +3560,8 @@ fn writeToFile( binary_bytes.items, header_offset, .function, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.functions.count()), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.functions.count())), ); section_count += 1; } @@ -3579,8 +3579,8 @@ fn writeToFile( binary_bytes.items, header_offset, .table, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.tables.items.len), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.tables.items.len)), ); section_count += 1; } @@ -3594,7 +3594,7 @@ fn writeToFile( binary_bytes.items, header_offset, .memory, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), @as(u32, 1), // wasm currently only supports 1 linear memory segment ); section_count += 1; @@ -3614,8 +3614,8 @@ fn writeToFile( binary_bytes.items, header_offset, .global, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.wasm_globals.items.len), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.wasm_globals.items.len)), ); section_count += 1; } @@ -3626,14 +3626,14 @@ fn writeToFile( for (wasm.exports.items) |exp| { const name = wasm.string_table.get(exp.name); - try leb.writeULEB128(binary_writer, @intCast(u32, name.len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(name.len))); try binary_writer.writeAll(name); try leb.writeULEB128(binary_writer, @intFromEnum(exp.kind)); try leb.writeULEB128(binary_writer, exp.index); } if (!import_memory) { - try leb.writeULEB128(binary_writer, @intCast(u32, "memory".len)); + try leb.writeULEB128(binary_writer, @as(u32, @intCast("memory".len))); try binary_writer.writeAll("memory"); try binary_writer.writeByte(std.wasm.externalKind(.memory)); try leb.writeULEB128(binary_writer, @as(u32, 0)); @@ -3643,8 +3643,8 @@ fn writeToFile( binary_bytes.items, header_offset, .@"export", - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, wasm.exports.items.len) + @intFromBool(!import_memory), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(wasm.exports.items.len)) + @intFromBool(!import_memory), ); section_count += 1; } @@ -3665,7 +3665,7 @@ fn writeToFile( if (flags == 0x02) { try leb.writeULEB128(binary_writer, @as(u8, 0)); // represents funcref } - try leb.writeULEB128(binary_writer, @intCast(u32, wasm.function_table.count())); + try leb.writeULEB128(binary_writer, @as(u32, @intCast(wasm.function_table.count()))); var symbol_it = wasm.function_table.keyIterator(); while (symbol_it.next()) |symbol_loc_ptr| { try leb.writeULEB128(binary_writer, symbol_loc_ptr.*.getSymbol(wasm).index); @@ -3675,7 +3675,7 @@ fn writeToFile( binary_bytes.items, header_offset, .element, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), @as(u32, 1), ); section_count += 1; @@ -3689,8 +3689,8 @@ fn writeToFile( binary_bytes.items, header_offset, .data_count, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, data_segments_count), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(data_segments_count)), ); } @@ -3731,13 +3731,13 @@ fn writeToFile( try binary_writer.writeAll(sorted_atom.code.items); } - code_section_size = @intCast(u32, binary_bytes.items.len - header_offset - header_size); + code_section_size = @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)); try writeVecSectionHeader( binary_bytes.items, header_offset, .code, code_section_size, - @intCast(u32, wasm.functions.count()), + @as(u32, @intCast(wasm.functions.count())), ); code_section_index = section_count; section_count += 1; @@ -3765,7 +3765,7 @@ fn writeToFile( } // when a segment is passive, it's initialized during runtime. if (!segment.isPassive()) { - try emitInit(binary_writer, .{ .i32_const = @bitCast(i32, segment.offset) }); + try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(segment.offset)) }); } // offset into data section try leb.writeULEB128(binary_writer, segment.size); @@ -3808,8 +3808,8 @@ fn writeToFile( binary_bytes.items, header_offset, .data, - @intCast(u32, binary_bytes.items.len - header_offset - header_size), - @intCast(u32, segment_count), + @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)), + @as(u32, @intCast(segment_count)), ); data_section_index = section_count; section_count += 1; @@ -3927,7 +3927,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: [] if (data.len == 0) return; const header_offset = try reserveCustomSectionHeader(binary_bytes); const writer = binary_bytes.writer(); - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); const start = binary_bytes.items.len - header_offset; @@ -3937,7 +3937,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: [] try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -3946,7 +3946,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { const writer = binary_bytes.writer(); const producers = "producers"; - try leb.writeULEB128(writer, @intCast(u32, producers.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(producers.len))); try writer.writeAll(producers); try leb.writeULEB128(writer, @as(u32, 2)); // 2 fields: Language + processed-by @@ -3958,7 +3958,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { // language field { const language = "language"; - try leb.writeULEB128(writer, @intCast(u32, language.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(language.len))); try writer.writeAll(language); // field_value_count (TODO: Parse object files for producer sections to detect their language) @@ -3969,7 +3969,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig" try writer.writeAll("Zig"); - try leb.writeULEB128(writer, @intCast(u32, version.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(version.len))); try writer.writeAll(version); } } @@ -3977,7 +3977,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { // processed-by field { const processed_by = "processed-by"; - try leb.writeULEB128(writer, @intCast(u32, processed_by.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(processed_by.len))); try writer.writeAll(processed_by); // field_value_count (TODO: Parse object files for producer sections to detect other used tools) @@ -3988,7 +3988,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig" try writer.writeAll("Zig"); - try leb.writeULEB128(writer, @intCast(u32, version.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(version.len))); try writer.writeAll(version); } } @@ -3996,7 +3996,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void { try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4005,17 +4005,17 @@ fn emitBuildIdSection(binary_bytes: *std.ArrayList(u8), build_id: []const u8) !v const writer = binary_bytes.writer(); const hdr_build_id = "build_id"; - try leb.writeULEB128(writer, @intCast(u32, hdr_build_id.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(hdr_build_id.len))); try writer.writeAll(hdr_build_id); try leb.writeULEB128(writer, @as(u32, 1)); - try leb.writeULEB128(writer, @intCast(u32, build_id.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(build_id.len))); try writer.writeAll(build_id); try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4024,17 +4024,17 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con const writer = binary_bytes.writer(); const target_features = "target_features"; - try leb.writeULEB128(writer, @intCast(u32, target_features.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(target_features.len))); try writer.writeAll(target_features); try leb.writeULEB128(writer, features_count); for (enabled_features, 0..) |enabled, feature_index| { if (enabled) { - const feature: types.Feature = .{ .prefix = .used, .tag = @enumFromInt(types.Feature.Tag, feature_index) }; + const feature: types.Feature = .{ .prefix = .used, .tag = @as(types.Feature.Tag, @enumFromInt(feature_index)) }; try leb.writeULEB128(writer, @intFromEnum(feature.prefix)); var buf: [100]u8 = undefined; const string = try std.fmt.bufPrint(&buf, "{}", .{feature.tag}); - try leb.writeULEB128(writer, @intCast(u32, string.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(string.len))); try writer.writeAll(string); } } @@ -4042,7 +4042,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4092,7 +4092,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem const header_offset = try reserveCustomSectionHeader(binary_bytes); const writer = binary_bytes.writer(); - try leb.writeULEB128(writer, @intCast(u32, "name".len)); + try leb.writeULEB128(writer, @as(u32, @intCast("name".len))); try writer.writeAll("name"); try wasm.emitNameSubsection(.function, funcs.values(), writer); @@ -4102,7 +4102,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem try writeCustomSectionHeader( binary_bytes.items, header_offset, - @intCast(u32, binary_bytes.items.len - header_offset - 6), + @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)), ); } @@ -4112,17 +4112,17 @@ fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: a defer section_list.deinit(); const sub_writer = section_list.writer(); - try leb.writeULEB128(sub_writer, @intCast(u32, names.len)); + try leb.writeULEB128(sub_writer, @as(u32, @intCast(names.len))); for (names) |name| { log.debug("Emit symbol '{s}' type({s})", .{ name.name, @tagName(section_id) }); try leb.writeULEB128(sub_writer, name.index); - try leb.writeULEB128(sub_writer, @intCast(u32, name.name.len)); + try leb.writeULEB128(sub_writer, @as(u32, @intCast(name.name.len))); try sub_writer.writeAll(name.name); } // From now, write to the actual writer try leb.writeULEB128(writer, @intFromEnum(section_id)); - try leb.writeULEB128(writer, @intCast(u32, section_list.items.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(section_list.items.len))); try writer.writeAll(section_list.items); } @@ -4146,11 +4146,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void { }, .f32_const => |val| { try writer.writeByte(std.wasm.opcode(.f32_const)); - try writer.writeIntLittle(u32, @bitCast(u32, val)); + try writer.writeIntLittle(u32, @as(u32, @bitCast(val))); }, .f64_const => |val| { try writer.writeByte(std.wasm.opcode(.f64_const)); - try writer.writeIntLittle(u64, @bitCast(u64, val)); + try writer.writeIntLittle(u64, @as(u64, @bitCast(val))); }, .global_get => |val| { try writer.writeByte(std.wasm.opcode(.global_get)); @@ -4162,11 +4162,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void { fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void { const module_name = wasm.string_table.get(import.module_name); - try leb.writeULEB128(writer, @intCast(u32, module_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(module_name.len))); try writer.writeAll(module_name); const name = wasm.string_table.get(import.name); - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); try writer.writeByte(@intFromEnum(import.kind)); @@ -4594,7 +4594,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 { // section id + fixed leb contents size + fixed leb vector length const header_size = 1 + 5 + 5; - const offset = @intCast(u32, bytes.items.len); + const offset = @as(u32, @intCast(bytes.items.len)); try bytes.appendSlice(&[_]u8{0} ** header_size); return offset; } @@ -4602,7 +4602,7 @@ fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 { fn reserveCustomSectionHeader(bytes: *std.ArrayList(u8)) !u32 { // unlike regular section, we don't emit the count const header_size = 1 + 5; - const offset = @intCast(u32, bytes.items.len); + const offset = @as(u32, @intCast(bytes.items.len)); try bytes.appendSlice(&[_]u8{0} ** header_size); return offset; } @@ -4638,7 +4638,7 @@ fn emitLinkSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: try wasm.emitSymbolTable(binary_bytes, symbol_table); try wasm.emitSegmentInfo(binary_bytes); - const size = @intCast(u32, binary_bytes.items.len - offset - 6); + const size = @as(u32, @intCast(binary_bytes.items.len - offset - 6)); try writeCustomSectionHeader(binary_bytes.items, offset, size); } @@ -4661,7 +4661,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: const sym_name = if (wasm.export_names.get(sym_loc)) |exp_name| wasm.string_table.get(exp_name) else sym_loc.getName(wasm); switch (symbol.tag) { .data => { - try leb.writeULEB128(writer, @intCast(u32, sym_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len))); try writer.writeAll(sym_name); if (symbol.isDefined()) { @@ -4678,7 +4678,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: else => { try leb.writeULEB128(writer, symbol.index); if (symbol.isDefined()) { - try leb.writeULEB128(writer, @intCast(u32, sym_name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len))); try writer.writeAll(sym_name); } }, @@ -4686,7 +4686,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: } var buf: [10]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @intCast(u32, binary_bytes.items.len - table_offset + 5)); + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, @intCast(binary_bytes.items.len - table_offset + 5))); leb.writeUnsignedFixed(5, buf[5..], symbol_count); try binary_bytes.insertSlice(table_offset, &buf); } @@ -4696,28 +4696,28 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void { try leb.writeULEB128(writer, @intFromEnum(types.SubsectionType.WASM_SEGMENT_INFO)); const segment_offset = binary_bytes.items.len; - try leb.writeULEB128(writer, @intCast(u32, wasm.segment_info.count())); + try leb.writeULEB128(writer, @as(u32, @intCast(wasm.segment_info.count()))); for (wasm.segment_info.values()) |segment_info| { log.debug("Emit segment: {s} align({d}) flags({b})", .{ segment_info.name, @ctz(segment_info.alignment), segment_info.flags, }); - try leb.writeULEB128(writer, @intCast(u32, segment_info.name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len))); try writer.writeAll(segment_info.name); try leb.writeULEB128(writer, @ctz(segment_info.alignment)); try leb.writeULEB128(writer, segment_info.flags); } var buf: [5]u8 = undefined; - leb.writeUnsignedFixed(5, &buf, @intCast(u32, binary_bytes.items.len - segment_offset)); + leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset))); try binary_bytes.insertSlice(segment_offset, &buf); } pub fn getULEB128Size(uint_value: anytype) u32 { const T = @TypeOf(uint_value); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; - var value = @intCast(U, uint_value); + var value = @as(U, @intCast(uint_value)); var size: u32 = 0; while (value != 0) : (size += 1) { @@ -4739,7 +4739,7 @@ fn emitCodeRelocations( // write custom section information const name = "reloc.CODE"; - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); try leb.writeULEB128(writer, section_index); const reloc_start = binary_bytes.items.len; @@ -4769,7 +4769,7 @@ fn emitCodeRelocations( var buf: [5]u8 = undefined; leb.writeUnsignedFixed(5, &buf, count); try binary_bytes.insertSlice(reloc_start, &buf); - const size = @intCast(u32, binary_bytes.items.len - header_offset - 6); + const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)); try writeCustomSectionHeader(binary_bytes.items, header_offset, size); } @@ -4785,7 +4785,7 @@ fn emitDataRelocations( // write custom section information const name = "reloc.DATA"; - try leb.writeULEB128(writer, @intCast(u32, name.len)); + try leb.writeULEB128(writer, @as(u32, @intCast(name.len))); try writer.writeAll(name); try leb.writeULEB128(writer, section_index); const reloc_start = binary_bytes.items.len; @@ -4821,7 +4821,7 @@ fn emitDataRelocations( var buf: [5]u8 = undefined; leb.writeUnsignedFixed(5, &buf, count); try binary_bytes.insertSlice(reloc_start, &buf); - const size = @intCast(u32, binary_bytes.items.len - header_offset - 6); + const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)); try writeCustomSectionHeader(binary_bytes.items, header_offset, size); } @@ -4852,7 +4852,7 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 { } // functype does not exist. - const index = @intCast(u32, wasm.func_types.items.len); + const index = @as(u32, @intCast(wasm.func_types.items.len)); const params = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.params); errdefer wasm.base.allocator.free(params); const returns = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.returns); diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig index f8092c6db14d..64e9ebaaa12d 100644 --- a/src/link/Wasm/Atom.zig +++ b/src/link/Wasm/Atom.zig @@ -114,7 +114,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void { .R_WASM_GLOBAL_INDEX_I32, .R_WASM_MEMORY_ADDR_I32, .R_WASM_SECTION_OFFSET_I32, - => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @intCast(u32, value)), + => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @as(u32, @intCast(value))), .R_WASM_TABLE_INDEX_I64, .R_WASM_MEMORY_ADDR_I64, => std.mem.writeIntLittle(u64, atom.code.items[reloc.offset..][0..8], value), @@ -127,7 +127,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void { .R_WASM_TABLE_NUMBER_LEB, .R_WASM_TYPE_INDEX_LEB, .R_WASM_MEMORY_ADDR_TLS_SLEB, - => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @intCast(u32, value)), + => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @as(u32, @intCast(value))), .R_WASM_MEMORY_ADDR_LEB64, .R_WASM_MEMORY_ADDR_SLEB64, .R_WASM_TABLE_INDEX_SLEB64, @@ -173,24 +173,24 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa if (symbol.isUndefined()) { return 0; } - const va = @intCast(i64, symbol.virtual_address); - return @intCast(u32, va + relocation.addend); + const va = @as(i64, @intCast(symbol.virtual_address)); + return @as(u32, @intCast(va + relocation.addend)); }, .R_WASM_EVENT_INDEX_LEB => return symbol.index, .R_WASM_SECTION_OFFSET_I32 => { const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?; const target_atom = wasm_bin.getAtom(target_atom_index); - const rel_value = @intCast(i32, target_atom.offset) + relocation.addend; - return @intCast(u32, rel_value); + const rel_value = @as(i32, @intCast(target_atom.offset)) + relocation.addend; + return @as(u32, @intCast(rel_value)); }, .R_WASM_FUNCTION_OFFSET_I32 => { const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse { - return @bitCast(u32, @as(i32, -1)); + return @as(u32, @bitCast(@as(i32, -1))); }; const target_atom = wasm_bin.getAtom(target_atom_index); const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded) - const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend; - return @intCast(u32, rel_value); + const rel_value = @as(i32, @intCast(target_atom.offset + offset)) + relocation.addend; + return @as(u32, @intCast(rel_value)); }, .R_WASM_MEMORY_ADDR_TLS_SLEB, .R_WASM_MEMORY_ADDR_TLS_SLEB64, diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index db9638193829..8e4df417ae32 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -93,7 +93,7 @@ const RelocatableData = struct { const data_alignment = object.segment_info[relocatable_data.index].alignment; if (data_alignment == 0) return 1; // Decode from power of 2 to natural alignment - return @as(u32, 1) << @intCast(u5, data_alignment); + return @as(u32, 1) << @as(u5, @intCast(data_alignment)); } /// Returns the symbol kind that corresponds to the relocatable section @@ -130,7 +130,7 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz const size = maybe_max_size orelse size: { errdefer gpa.free(object.name); const stat = try file.stat(); - break :size @intCast(usize, stat.size); + break :size @as(usize, @intCast(stat.size)); }; const file_contents = try gpa.alloc(u8, size); @@ -365,7 +365,7 @@ fn Parser(comptime ReaderType: type) type { const len = try readLeb(u32, parser.reader.reader()); var limited_reader = std.io.limitedReader(parser.reader.reader(), len); const reader = limited_reader.reader(); - switch (@enumFromInt(std.wasm.Section, byte)) { + switch (@as(std.wasm.Section, @enumFromInt(byte))) { .custom => { const name_len = try readLeb(u32, reader); const name = try gpa.alloc(u8, name_len); @@ -375,13 +375,13 @@ fn Parser(comptime ReaderType: type) type { if (std.mem.eql(u8, name, "linking")) { is_object_file.* = true; parser.object.relocatable_data = relocatable_data.items; // at this point no new relocatable sections will appear so we're free to store them. - try parser.parseMetadata(gpa, @intCast(usize, reader.context.bytes_left)); + try parser.parseMetadata(gpa, @as(usize, @intCast(reader.context.bytes_left))); } else if (std.mem.startsWith(u8, name, "reloc")) { try parser.parseRelocations(gpa); } else if (std.mem.eql(u8, name, "target_features")) { try parser.parseFeatures(gpa); } else if (std.mem.startsWith(u8, name, ".debug")) { - const debug_size = @intCast(u32, reader.context.bytes_left); + const debug_size = @as(u32, @intCast(reader.context.bytes_left)); const debug_content = try gpa.alloc(u8, debug_size); errdefer gpa.free(debug_content); try reader.readNoEof(debug_content); @@ -514,7 +514,7 @@ fn Parser(comptime ReaderType: type) type { const count = try readLeb(u32, reader); while (index < count) : (index += 1) { const code_len = try readLeb(u32, reader); - const offset = @intCast(u32, start - reader.context.bytes_left); + const offset = @as(u32, @intCast(start - reader.context.bytes_left)); const data = try gpa.alloc(u8, code_len); errdefer gpa.free(data); try reader.readNoEof(data); @@ -538,7 +538,7 @@ fn Parser(comptime ReaderType: type) type { _ = flags; // TODO: Do we need to check flags to detect passive/active memory? _ = data_offset; const data_len = try readLeb(u32, reader); - const offset = @intCast(u32, start - reader.context.bytes_left); + const offset = @as(u32, @intCast(start - reader.context.bytes_left)); const data = try gpa.alloc(u8, data_len); errdefer gpa.free(data); try reader.readNoEof(data); @@ -645,7 +645,7 @@ fn Parser(comptime ReaderType: type) type { /// such as access to the `import` section to find the name of a symbol. fn parseSubsection(parser: *ObjectParser, gpa: Allocator, reader: anytype) !void { const sub_type = try leb.readULEB128(u8, reader); - log.debug("Found subsection: {s}", .{@tagName(@enumFromInt(types.SubsectionType, sub_type))}); + log.debug("Found subsection: {s}", .{@tagName(@as(types.SubsectionType, @enumFromInt(sub_type)))}); const payload_len = try leb.readULEB128(u32, reader); if (payload_len == 0) return; @@ -655,7 +655,7 @@ fn Parser(comptime ReaderType: type) type { // every subsection contains a 'count' field const count = try leb.readULEB128(u32, limited_reader); - switch (@enumFromInt(types.SubsectionType, sub_type)) { + switch (@as(types.SubsectionType, @enumFromInt(sub_type))) { .WASM_SEGMENT_INFO => { const segments = try gpa.alloc(types.Segment, count); errdefer gpa.free(segments); @@ -714,7 +714,7 @@ fn Parser(comptime ReaderType: type) type { errdefer gpa.free(symbols); for (symbols) |*symbol| { symbol.* = .{ - .kind = @enumFromInt(types.ComdatSym.Type, try leb.readULEB128(u8, reader)), + .kind = @as(types.ComdatSym.Type, @enumFromInt(try leb.readULEB128(u8, reader))), .index = try leb.readULEB128(u32, reader), }; } @@ -758,7 +758,7 @@ fn Parser(comptime ReaderType: type) type { /// requires access to `Object` to find the name of a symbol when it's /// an import and flag `WASM_SYM_EXPLICIT_NAME` is not set. fn parseSymbol(parser: *ObjectParser, gpa: Allocator, reader: anytype) !Symbol { - const tag = @enumFromInt(Symbol.Tag, try leb.readULEB128(u8, reader)); + const tag = @as(Symbol.Tag, @enumFromInt(try leb.readULEB128(u8, reader))); const flags = try leb.readULEB128(u32, reader); var symbol: Symbol = .{ .flags = flags, @@ -846,7 +846,7 @@ fn readLeb(comptime T: type, reader: anytype) !T { /// Asserts `T` is an enum fn readEnum(comptime T: type, reader: anytype) !T { switch (@typeInfo(T)) { - .Enum => |enum_type| return @enumFromInt(T, try readLeb(enum_type.tag_type, reader)), + .Enum => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))), else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)), } } @@ -867,7 +867,7 @@ fn readLimits(reader: anytype) !std.wasm.Limits { fn readInit(reader: anytype) !std.wasm.InitExpression { const opcode = try reader.readByte(); - const init_expr: std.wasm.InitExpression = switch (@enumFromInt(std.wasm.Opcode, opcode)) { + const init_expr: std.wasm.InitExpression = switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) { .i32_const => .{ .i32_const = try readLeb(i32, reader) }, .global_get => .{ .global_get = try readLeb(u32, reader) }, else => @panic("TODO: initexpression for other opcodes"), @@ -899,7 +899,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b switch (symbol.tag) { .function, .data, .section => if (!symbol.isUndefined()) { const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index }); - const sym_idx = @intCast(u32, symbol_index); + const sym_idx = @as(u32, @intCast(symbol_index)); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(u32).init(gpa); } @@ -910,11 +910,11 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b } for (object.relocatable_data, 0..) |relocatable_data, index| { - const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse { + const final_index = (try wasm_bin.getMatchingSegment(object_index, @as(u32, @intCast(index)))) orelse { continue; // found unknown section, so skip parsing into atom as we do not know how to handle it. }; - const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len); + const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len)); const atom = try wasm_bin.managed_atoms.addOne(gpa); atom.* = Atom.empty; atom.file = object_index; diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig index 9bf54f25c3d1..cce5cdef49d8 100644 --- a/src/link/Wasm/types.zig +++ b/src/link/Wasm/types.zig @@ -205,7 +205,7 @@ pub const Feature = struct { /// From a given cpu feature, returns its linker feature pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag { - return @enumFromInt(Tag, @intFromEnum(feature)); + return @as(Tag, @enumFromInt(@intFromEnum(feature))); } pub fn format(tag: Tag, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { diff --git a/src/link/strtab.zig b/src/link/strtab.zig index abb58defefd2..0d71c9bf839f 100644 --- a/src/link/strtab.zig +++ b/src/link/strtab.zig @@ -45,7 +45,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type { const off = entry.key_ptr.*; const save = entry.value_ptr.*; if (!save) continue; - const new_off = @intCast(u32, buffer.items.len); + const new_off = @as(u32, @intCast(buffer.items.len)); buffer.appendSliceAssumeCapacity(self.getAssumeExists(off)); idx_map.putAssumeCapacityNoClobber(off, new_off); } @@ -73,7 +73,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type { } try self.buffer.ensureUnusedCapacity(gpa, string.len + 1); - const new_off = @intCast(u32, self.buffer.items.len); + const new_off = @as(u32, @intCast(self.buffer.items.len)); log.debug("writing new string '{s}' at offset 0x{x}", .{ string, new_off }); @@ -103,7 +103,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type { pub fn get(self: Self, off: u32) ?[]const u8 { log.debug("getting string at 0x{x}", .{off}); if (off >= self.buffer.items.len) return null; - return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.items.ptr + off), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.items.ptr + off)), 0); } pub fn getAssumeExists(self: Self, off: u32) []const u8 { diff --git a/src/link/table_section.zig b/src/link/table_section.zig index 891f3b1a50c2..2c70b03f4290 100644 --- a/src/link/table_section.zig +++ b/src/link/table_section.zig @@ -18,7 +18,7 @@ pub fn TableSection(comptime Entry: type) type { break :blk index; } else { log.debug(" (allocating entry at index {d})", .{self.entries.items.len}); - const index = @intCast(u32, self.entries.items.len); + const index = @as(u32, @intCast(self.entries.items.len)); _ = self.entries.addOneAssumeCapacity(); break :blk index; } diff --git a/src/link/tapi/Tokenizer.zig b/src/link/tapi/Tokenizer.zig index df46bb7d831d..eb1ffc0e81fc 100644 --- a/src/link/tapi/Tokenizer.zig +++ b/src/link/tapi/Tokenizer.zig @@ -67,11 +67,11 @@ pub const TokenIterator = struct { } pub fn seekBy(self: *TokenIterator, offset: isize) void { - const new_pos = @bitCast(isize, self.pos) + offset; + const new_pos = @as(isize, @bitCast(self.pos)) + offset; if (new_pos < 0) { self.pos = 0; } else { - self.pos = @intCast(usize, new_pos); + self.pos = @as(usize, @intCast(new_pos)); } } }; diff --git a/src/main.zig b/src/main.zig index 22d2d075d1e4..02e1ef6f00d4 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3523,7 +3523,7 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th server.serveMessage(.{ .tag = .progress, - .bytes_len = @intCast(u32, progress_string.len), + .bytes_len = @as(u32, @intCast(progress_string.len)), }, &.{ progress_string, }) catch |err| { @@ -5020,8 +5020,8 @@ pub fn clangMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}! // Convert the args to the null-terminated format Clang expects. const argv = try argsCopyZ(arena, args); - const exit_code = ZigClang_main(@intCast(c_int, argv.len), argv.ptr); - return @bitCast(u8, @truncate(i8, exit_code)); + const exit_code = ZigClang_main(@as(c_int, @intCast(argv.len)), argv.ptr); + return @as(u8, @bitCast(@as(i8, @truncate(exit_code)))); } pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 { @@ -5035,8 +5035,8 @@ pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory} // Convert the args to the format llvm-ar expects. // We intentionally shave off the zig binary at args[0]. const argv = try argsCopyZ(arena, args[1..]); - const exit_code = ZigLlvmAr_main(@intCast(c_int, argv.len), argv.ptr); - return @bitCast(u8, @truncate(i8, exit_code)); + const exit_code = ZigLlvmAr_main(@as(c_int, @intCast(argv.len)), argv.ptr); + return @as(u8, @bitCast(@as(i8, @truncate(exit_code)))); } /// The first argument determines which backend is invoked. The options are: @@ -5072,7 +5072,7 @@ pub fn lldMain( // "If an error occurs, false will be returned." const ok = rc: { const llvm = @import("codegen/llvm/bindings.zig"); - const argc = @intCast(c_int, argv.len); + const argc = @as(c_int, @intCast(argv.len)); if (mem.eql(u8, args[1], "ld.lld")) { break :rc llvm.LinkELF(argc, argv.ptr, can_exit_early, false); } else if (mem.eql(u8, args[1], "lld-link")) { @@ -5507,7 +5507,7 @@ pub fn cmdAstCheck( if (stat.size > max_src_size) return error.FileTooBig; - const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); const amt = try f.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; @@ -5703,7 +5703,7 @@ pub fn cmdChangelist( file.pkg = try Package.create(gpa, null, file.sub_file_path); defer file.pkg.destroy(gpa); - const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0); + const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); const amt = try f.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; @@ -5739,7 +5739,7 @@ pub fn cmdChangelist( if (new_stat.size > max_src_size) return error.FileTooBig; - const new_source = try arena.allocSentinel(u8, @intCast(usize, new_stat.size), 0); + const new_source = try arena.allocSentinel(u8, @as(usize, @intCast(new_stat.size)), 0); const new_amt = try new_f.readAll(new_source); if (new_amt != new_stat.size) return error.UnexpectedEndOfFile; diff --git a/src/objcopy.zig b/src/objcopy.zig index f89af1737c97..887396ee1860 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -345,7 +345,7 @@ const BinaryElfOutput = struct { const shstrtab_shdr = (try section_headers.next()).?; - const buffer = try allocator.alloc(u8, @intCast(usize, shstrtab_shdr.sh_size)); + const buffer = try allocator.alloc(u8, @as(usize, @intCast(shstrtab_shdr.sh_size))); errdefer allocator.free(buffer); const num_read = try elf_file.preadAll(buffer, shstrtab_shdr.sh_offset); @@ -363,11 +363,11 @@ const BinaryElfOutput = struct { newSection.binaryOffset = 0; newSection.elfOffset = section.sh_offset; - newSection.fileSize = @intCast(usize, section.sh_size); + newSection.fileSize = @as(usize, @intCast(section.sh_size)); newSection.segment = null; newSection.name = if (self.shstrtab) |shstrtab| - std.mem.span(@ptrCast([*:0]const u8, &shstrtab[section.sh_name])) + std.mem.span(@as([*:0]const u8, @ptrCast(&shstrtab[section.sh_name]))) else null; @@ -382,7 +382,7 @@ const BinaryElfOutput = struct { newSegment.physicalAddress = if (phdr.p_paddr != 0) phdr.p_paddr else phdr.p_vaddr; newSegment.virtualAddress = phdr.p_vaddr; - newSegment.fileSize = @intCast(usize, phdr.p_filesz); + newSegment.fileSize = @as(usize, @intCast(phdr.p_filesz)); newSegment.elfOffset = phdr.p_offset; newSegment.binaryOffset = 0; newSegment.firstSection = null; @@ -478,8 +478,8 @@ const HexWriter = struct { const MAX_PAYLOAD_LEN: u8 = 16; fn addressParts(address: u16) [2]u8 { - const msb = @truncate(u8, address >> 8); - const lsb = @truncate(u8, address); + const msb = @as(u8, @truncate(address >> 8)); + const lsb = @as(u8, @truncate(address)); return [2]u8{ msb, lsb }; } @@ -508,14 +508,14 @@ const HexWriter = struct { fn Data(address: u32, data: []const u8) Record { return Record{ - .address = @intCast(u16, address % 0x10000), + .address = @as(u16, @intCast(address % 0x10000)), .payload = .{ .Data = data }, }; } fn Address(address: u32) Record { assert(address > 0xFFFF); - const segment = @intCast(u16, address / 0x10000); + const segment = @as(u16, @intCast(address / 0x10000)); if (address > 0xFFFFF) { return Record{ .address = 0, @@ -540,7 +540,7 @@ const HexWriter = struct { fn checksum(self: Record) u8 { const payload_bytes = self.getPayloadBytes(); - var sum: u8 = @intCast(u8, payload_bytes.len); + var sum: u8 = @as(u8, @intCast(payload_bytes.len)); const parts = addressParts(self.address); sum +%= parts[0]; sum +%= parts[1]; @@ -560,7 +560,7 @@ const HexWriter = struct { assert(payload_bytes.len <= MAX_PAYLOAD_LEN); const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3s}{4X:0>2}" ++ linesep, .{ - @intCast(u8, payload_bytes.len), + @as(u8, @intCast(payload_bytes.len)), self.address, @intFromEnum(self.payload), std.fmt.fmtSliceHexUpper(payload_bytes), @@ -574,10 +574,10 @@ const HexWriter = struct { var buf: [MAX_PAYLOAD_LEN]u8 = undefined; var bytes_read: usize = 0; while (bytes_read < segment.fileSize) { - const row_address = @intCast(u32, segment.physicalAddress + bytes_read); + const row_address = @as(u32, @intCast(segment.physicalAddress + bytes_read)); const remaining = segment.fileSize - bytes_read; - const to_read = @intCast(usize, @min(remaining, MAX_PAYLOAD_LEN)); + const to_read = @as(usize, @intCast(@min(remaining, MAX_PAYLOAD_LEN))); const did_read = try elf_file.preadAll(buf[0..to_read], segment.elfOffset + bytes_read); if (did_read < to_read) return error.UnexpectedEOF; @@ -593,7 +593,7 @@ const HexWriter = struct { try Record.Address(address).write(self.out_file); } try record.write(self.out_file); - self.prev_addr = @intCast(u32, record.address + data.len); + self.prev_addr = @as(u32, @intCast(record.address + data.len)); } fn writeEOF(self: HexWriter) File.WriteError!void { @@ -814,7 +814,7 @@ fn ElfFile(comptime is_64: bool) type { const need_strings = (idx == header.shstrndx); if (need_data or need_strings) { - const buffer = try allocator.alignedAlloc(u8, section_memory_align, @intCast(usize, section.section.sh_size)); + const buffer = try allocator.alignedAlloc(u8, section_memory_align, @as(usize, @intCast(section.section.sh_size))); const bytes_read = try in_file.preadAll(buffer, section.section.sh_offset); if (bytes_read != section.section.sh_size) return error.TRUNCATED_ELF; section.payload = buffer; @@ -831,7 +831,7 @@ fn ElfFile(comptime is_64: bool) type { } else null; if (section.section.sh_name != 0 and header.shstrndx != elf.SHN_UNDEF) - section.name = std.mem.span(@ptrCast([*:0]const u8, §ions[header.shstrndx].payload.?[section.section.sh_name])); + section.name = std.mem.span(@as([*:0]const u8, @ptrCast(§ions[header.shstrndx].payload.?[section.section.sh_name]))); const category_from_program: SectionCategory = if (section.segment != null) .exe else .debug; section.category = switch (section.section.sh_type) { @@ -935,7 +935,7 @@ fn ElfFile(comptime is_64: bool) type { const update = §ions_update[self.raw_elf_header.e_shstrndx]; const name: []const u8 = ".gnu_debuglink"; - const new_offset = @intCast(u32, strtab.payload.?.len); + const new_offset = @as(u32, @intCast(strtab.payload.?.len)); const buf = try allocator.alignedAlloc(u8, section_memory_align, new_offset + name.len + 1); @memcpy(buf[0..new_offset], strtab.payload.?); @memcpy(buf[new_offset..][0..name.len], name); @@ -965,7 +965,7 @@ fn ElfFile(comptime is_64: bool) type { update.payload = payload; update.section = section.section; update.section.?.sh_addralign = @alignOf(Elf_Chdr); - update.section.?.sh_size = @intCast(Elf_OffSize, payload.len); + update.section.?.sh_size = @as(Elf_OffSize, @intCast(payload.len)); update.section.?.sh_flags |= elf.SHF_COMPRESSED; } } @@ -991,7 +991,7 @@ fn ElfFile(comptime is_64: bool) type { const data = std.mem.sliceAsBytes(self.program_segments); assert(data.len == @as(usize, updated_elf_header.e_phentsize) * updated_elf_header.e_phnum); cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = data, .out_offset = updated_elf_header.e_phoff } }); - eof_offset = updated_elf_header.e_phoff + @intCast(Elf_OffSize, data.len); + eof_offset = updated_elf_header.e_phoff + @as(Elf_OffSize, @intCast(data.len)); } // update sections and queue payload writes @@ -1032,7 +1032,7 @@ fn ElfFile(comptime is_64: bool) type { dest.sh_info = sections_update[src.sh_info].remap_idx; if (payload) |data| - dest.sh_size = @intCast(Elf_OffSize, data.len); + dest.sh_size = @as(Elf_OffSize, @intCast(data.len)); const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign); @@ -1056,7 +1056,7 @@ fn ElfFile(comptime is_64: bool) type { const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len); @memcpy(data, src_data); - const defs = @ptrCast([*]Elf_Verdef, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Verdef)]; + const defs = @as([*]Elf_Verdef, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Verdef)]; for (defs) |*def| { if (def.vd_ndx != elf.SHN_UNDEF) def.vd_ndx = sections_update[src.sh_info].remap_idx; @@ -1068,7 +1068,7 @@ fn ElfFile(comptime is_64: bool) type { const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len); @memcpy(data, src_data); - const syms = @ptrCast([*]Elf_Sym, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Sym)]; + const syms = @as([*]Elf_Sym, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Sym)]; for (syms) |*sym| { if (sym.st_shndx != elf.SHN_UNDEF and sym.st_shndx < elf.SHN_LORESERVE) sym.st_shndx = sections_update[sym.st_shndx].remap_idx; @@ -1110,7 +1110,7 @@ fn ElfFile(comptime is_64: bool) type { .sh_flags = 0, .sh_addr = 0, .sh_offset = eof_offset, - .sh_size = @intCast(Elf_OffSize, payload.len), + .sh_size = @as(Elf_OffSize, @intCast(payload.len)), .sh_link = elf.SHN_UNDEF, .sh_info = elf.SHN_UNDEF, .sh_addralign = 4, @@ -1119,7 +1119,7 @@ fn ElfFile(comptime is_64: bool) type { dest_section_idx += 1; cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = payload, .out_offset = eof_offset } }); - eof_offset += @intCast(Elf_OffSize, payload.len); + eof_offset += @as(Elf_OffSize, @intCast(payload.len)); } assert(dest_section_idx == new_shnum); @@ -1232,7 +1232,7 @@ const ElfFileHelper = struct { fused_cmd = null; } if (data.out_offset > offset) { - consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(usize, data.out_offset - offset)], .out_offset = offset } }); + consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@as(usize, @intCast(data.out_offset - offset))], .out_offset = offset } }); } consolidated.appendAssumeCapacity(cmd); offset = data.out_offset + data.data.len; @@ -1249,7 +1249,7 @@ const ElfFileHelper = struct { } else { consolidated.appendAssumeCapacity(prev); if (range.out_offset > offset) { - consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(usize, range.out_offset - offset)], .out_offset = offset } }); + consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@as(usize, @intCast(range.out_offset - offset))], .out_offset = offset } }); } fused_cmd = cmd; } @@ -1286,7 +1286,7 @@ const ElfFileHelper = struct { var section_reader = std.io.limitedReader(in_file.reader(), size); // allocate as large as decompressed data. if the compression doesn't fit, keep the data uncompressed. - const compressed_data = try allocator.alignedAlloc(u8, 8, @intCast(usize, size)); + const compressed_data = try allocator.alignedAlloc(u8, 8, @as(usize, @intCast(size))); var compressed_stream = std.io.fixedBufferStream(compressed_data); try compressed_stream.writer().writeAll(prefix); @@ -1317,7 +1317,7 @@ const ElfFileHelper = struct { }; } - const compressed_len = @intCast(usize, compressed_stream.getPos() catch unreachable); + const compressed_len = @as(usize, @intCast(compressed_stream.getPos() catch unreachable)); const data = allocator.realloc(compressed_data, compressed_len) catch compressed_data; return data[0..compressed_len]; } diff --git a/src/print_air.zig b/src/print_air.zig index d73ec308917f..4ae83271a188 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -91,7 +91,7 @@ const Writer = struct { fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { for (w.air.instructions.items(.tag), 0..) |tag, i| { if (tag != .interned) continue; - const inst = @intCast(Air.Inst.Index, i); + const inst = @as(Air.Inst.Index, @intCast(i)); try w.writeInst(s, inst); try s.writeByte('\n'); } @@ -424,8 +424,8 @@ const Writer = struct { const mod = w.module; const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const vector_ty = w.air.getRefType(ty_pl.ty); - const len = @intCast(usize, vector_ty.arrayLen(mod)); - const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]); + const len = @as(usize, @intCast(vector_ty.arrayLen(mod))); + const elements = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[ty_pl.payload..][0..len])); try w.writeType(s, vector_ty); try s.writeAll(", ["); @@ -607,8 +607,8 @@ const Writer = struct { fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.Asm, ty_pl.payload); - const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; - const clobbers_len = @truncate(u31, extra.data.flags); + const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; + const clobbers_len = @as(u31, @truncate(extra.data.flags)); var extra_i: usize = extra.end; var op_index: usize = 0; @@ -619,9 +619,9 @@ const Writer = struct { try s.writeAll(", volatile"); } - const outputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.outputs_len])); extra_i += outputs.len; - const inputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.inputs_len])); extra_i += inputs.len; for (outputs) |output| { @@ -699,7 +699,7 @@ const Writer = struct { fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Call, pl_op.payload); - const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]); + const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra.end..][0..extra.data.args_len])); try w.writeOperand(s, inst, 0, pl_op.operand); try s.writeAll(", ["); for (args, 0..) |arg, i| { @@ -855,7 +855,7 @@ const Writer = struct { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = w.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]); + const items = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[case.end..][0..case.data.items_len])); const case_body = w.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; @@ -934,13 +934,13 @@ const Writer = struct { const small_tomb_bits = Liveness.bpi - 1; const dies = if (w.liveness) |liveness| blk: { if (op_index < small_tomb_bits) - break :blk liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index)); + break :blk liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(op_index))); var extra_index = liveness.special.get(inst).?; var tomb_op_index: usize = small_tomb_bits; while (true) { const bits = liveness.extra[extra_index]; if (op_index < tomb_op_index + 31) { - break :blk @truncate(u1, bits >> @intCast(u5, op_index - tomb_op_index)) != 0; + break :blk @as(u1, @truncate(bits >> @as(u5, @intCast(op_index - tomb_op_index)))) != 0; } if ((bits >> 31) != 0) break :blk false; extra_index += 1; diff --git a/src/print_targets.zig b/src/print_targets.zig index ea4e30ae583f..62e1d3b1581f 100644 --- a/src/print_targets.zig +++ b/src/print_targets.zig @@ -100,7 +100,7 @@ pub fn cmdTargets( try jws.objectField(model.name); try jws.beginArray(); for (arch.allFeaturesList(), 0..) |feature, i_usize| { - const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize); + const index = @as(Target.Cpu.Feature.Set.Index, @intCast(i_usize)); if (model.features.isEnabled(index)) { try jws.arrayElem(); try jws.emitString(feature.name); @@ -147,7 +147,7 @@ pub fn cmdTargets( try jws.objectField("features"); try jws.beginArray(); for (native_target.cpu.arch.allFeaturesList(), 0..) |feature, i_usize| { - const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize); + const index = @as(Target.Cpu.Feature.Set.Index, @intCast(i_usize)); if (cpu.features.isEnabled(index)) { try jws.arrayElem(); try jws.emitString(feature.name); diff --git a/src/print_zir.zig b/src/print_zir.zig index 472461cd0481..42a9abf401c5 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -131,7 +131,7 @@ const Writer = struct { recurse_blocks: bool, fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index { - return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node)); + return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(self.parent_decl_node)))); } fn writeInstToStream( @@ -542,7 +542,7 @@ const Writer = struct { } fn writeExtNode(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); + const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand))); try stream.writeAll(")) "); try self.writeSrc(stream, src); } @@ -631,25 +631,25 @@ const Writer = struct { var extra_index = extra.end; if (inst_data.flags.has_sentinel) { try stream.writeAll(", "); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]))); extra_index += 1; } if (inst_data.flags.has_align) { try stream.writeAll(", align("); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]))); extra_index += 1; if (inst_data.flags.has_bit_range) { const bit_start = extra_index + @intFromBool(inst_data.flags.has_addrspace); try stream.writeAll(":"); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[bit_start])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[bit_start]))); try stream.writeAll(":"); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[bit_start + 1])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[bit_start + 1]))); } try stream.writeAll(")"); } if (inst_data.flags.has_addrspace) { try stream.writeAll(", addrspace("); - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index])); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]))); try stream.writeAll(")"); } try stream.writeAll(") "); @@ -691,7 +691,7 @@ const Writer = struct { const src = inst_data.src(); const number = extra.get(); // TODO improve std.format to be able to print f128 values - try stream.print("{d}) ", .{@floatCast(f64, number)}); + try stream.print("{d}) ", .{@as(f64, @floatCast(number))}); try self.writeSrc(stream, src); } @@ -964,7 +964,7 @@ const Writer = struct { } fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); if (flags.ptr_cast) try stream.writeAll("ptr_cast, "); @@ -980,7 +980,7 @@ const Writer = struct { } fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small)); + const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small)))); const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); if (flags.const_cast) try stream.writeAll("const_cast, "); @@ -1103,14 +1103,14 @@ const Writer = struct { ) !void { const extra = self.code.extraData(Zir.Inst.Asm, extended.operand); const src = LazySrcLoc.nodeOffset(extra.data.src_node); - const outputs_len = @truncate(u5, extended.small); - const inputs_len = @truncate(u5, extended.small >> 5); - const clobbers_len = @truncate(u5, extended.small >> 10); - const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs_len = @as(u5, @truncate(extended.small)); + const inputs_len = @as(u5, @truncate(extended.small >> 5)); + const clobbers_len = @as(u5, @truncate(extended.small >> 10)); + const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0; try self.writeFlag(stream, "volatile, ", is_volatile); if (tmpl_is_expr) { - try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, extra.data.asm_source)); + try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source))); try stream.writeAll(", "); } else { const asm_source = self.code.nullTerminatedString(extra.data.asm_source); @@ -1126,7 +1126,7 @@ const Writer = struct { const output = self.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; - const is_type = @truncate(u1, output_type_bits) != 0; + const is_type = @as(u1, @truncate(output_type_bits)) != 0; output_type_bits >>= 1; const name = self.code.nullTerminatedString(output.data.name); @@ -1205,7 +1205,7 @@ const Writer = struct { if (extra.data.flags.ensure_result_used) { try stream.writeAll("nodiscard "); } - try stream.print(".{s}, ", .{@tagName(@enumFromInt(std.builtin.CallModifier, extra.data.flags.packed_modifier))}); + try stream.print(".{s}, ", .{@tagName(@as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier)))}); switch (kind) { .direct => try self.writeInstRef(stream, extra.data.callee), .field => { @@ -1280,12 +1280,12 @@ const Writer = struct { } fn writeStructDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); + const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -1313,7 +1313,7 @@ const Writer = struct { extra_index += 1; try stream.writeAll("Packed("); if (backing_int_body_len == 0) { - const backing_int_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try self.writeInstRef(stream, backing_int_ref); } else { @@ -1369,13 +1369,13 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_default = @truncate(u1, cur_bit_bag) != 0; + const has_default = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const is_comptime = @truncate(u1, cur_bit_bag) != 0; + const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_type_body = @truncate(u1, cur_bit_bag) != 0; + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; var field_name: u32 = 0; @@ -1395,7 +1395,7 @@ const Writer = struct { if (has_type_body) { fields[field_i].type_len = self.code.extra[extra_index]; } else { - fields[field_i].type = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + fields[field_i].type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); } extra_index += 1; @@ -1469,18 +1469,18 @@ const Writer = struct { } fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); + const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; const tag_type_ref = if (small.has_tag_type) blk: { - const tag_type_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; @@ -1557,13 +1557,13 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_type = @truncate(u1, cur_bit_bag) != 0; + const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_value = @truncate(u1, cur_bit_bag) != 0; + const has_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const unused = @truncate(u1, cur_bit_bag) != 0; + const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; @@ -1578,14 +1578,14 @@ const Writer = struct { try stream.print("{}", .{std.zig.fmtId(field_name)}); if (has_type) { - const field_type = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const field_type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(": "); try self.writeInstRef(stream, field_type); } if (has_align) { - const align_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(" align("); @@ -1593,7 +1593,7 @@ const Writer = struct { try stream.writeAll(")"); } if (has_value) { - const default_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const default_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(" = "); @@ -1621,13 +1621,13 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const is_pub = @truncate(u1, cur_bit_bag) != 0; + const is_pub = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const is_exported = @truncate(u1, cur_bit_bag) != 0; + const is_exported = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_align = @truncate(u1, cur_bit_bag) != 0; + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - const has_section_or_addrspace = @truncate(u1, cur_bit_bag) != 0; + const has_section_or_addrspace = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const sub_index = extra_index; @@ -1644,23 +1644,23 @@ const Writer = struct { extra_index += 1; const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :inst inst; }; const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :inst inst; }; const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { - const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :inst inst; }; const pub_str = if (is_pub) "pub " else ""; - const hash_bytes = @bitCast([16]u8, hash_u32s.*); + const hash_bytes = @as([16]u8, @bitCast(hash_u32s.*)); if (decl_name_index == 0) { try stream.writeByteNTimes(' ', self.indent); const name = if (is_exported) "usingnamespace" else "comptime"; @@ -1728,17 +1728,17 @@ const Writer = struct { } fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); + const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; const tag_type_ref = if (small.has_tag_type) blk: { - const tag_type_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; @@ -1808,7 +1808,7 @@ const Writer = struct { cur_bit_bag = self.code.extra[bit_bag_index]; bit_bag_index += 1; } - const has_tag_value = @truncate(u1, cur_bit_bag) != 0; + const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]); @@ -1823,7 +1823,7 @@ const Writer = struct { try stream.print("{}", .{std.zig.fmtId(field_name)}); if (has_tag_value) { - const tag_value_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const tag_value_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; try stream.writeAll(" = "); @@ -1844,11 +1844,11 @@ const Writer = struct { stream: anytype, extended: Zir.Inst.Extended.InstData, ) !void { - const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); + const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src_node: ?i32 = if (small.has_src_node) blk: { - const src_node = @bitCast(i32, self.code.extra[extra_index]); + const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); extra_index += 1; break :blk src_node; } else null; @@ -1892,7 +1892,7 @@ const Writer = struct { try stream.writeAll("{\n"); self.indent += 2; - var extra_index = @intCast(u32, extra.end); + var extra_index = @as(u32, @intCast(extra.end)); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { const str_index = self.code.extra[extra_index]; @@ -1945,7 +1945,7 @@ const Writer = struct { else => break :else_prong, }; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index])); const capture_text = switch (info.capture) { .none => "", .by_val => "by_val ", @@ -1966,9 +1966,9 @@ const Writer = struct { const scalar_cases_len = extra.data.bits.scalar_cases_len; var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const item_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index])); extra_index += 1; const body = self.code.extra[extra_index..][0..info.body_len]; extra_index += info.body_len; @@ -1993,7 +1993,7 @@ const Writer = struct { extra_index += 1; const ranges_len = self.code.extra[extra_index]; extra_index += 1; - const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]); + const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index])); extra_index += 1; const items = self.code.refSlice(extra_index, items_len); extra_index += items_len; @@ -2014,9 +2014,9 @@ const Writer = struct { var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { - const item_first = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const item_first = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; - const item_last = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const item_last = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; if (range_i != 0 or items.len != 0) { @@ -2117,7 +2117,7 @@ const Writer = struct { ret_ty_ref = .void_type; }, 1 => { - ret_ty_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; }, else => { @@ -2188,7 +2188,7 @@ const Writer = struct { align_body = self.code.extra[extra_index..][0..body_len]; extra_index += align_body.len; } else if (extra.data.bits.has_align_ref) { - align_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_addrspace_body) { @@ -2197,7 +2197,7 @@ const Writer = struct { addrspace_body = self.code.extra[extra_index..][0..body_len]; extra_index += addrspace_body.len; } else if (extra.data.bits.has_addrspace_ref) { - addrspace_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_section_body) { @@ -2206,7 +2206,7 @@ const Writer = struct { section_body = self.code.extra[extra_index..][0..body_len]; extra_index += section_body.len; } else if (extra.data.bits.has_section_ref) { - section_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + section_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_cc_body) { @@ -2215,7 +2215,7 @@ const Writer = struct { cc_body = self.code.extra[extra_index..][0..body_len]; extra_index += cc_body.len; } else if (extra.data.bits.has_cc_ref) { - cc_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + cc_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } if (extra.data.bits.has_ret_ty_body) { @@ -2224,7 +2224,7 @@ const Writer = struct { ret_ty_body = self.code.extra[extra_index..][0..body_len]; extra_index += ret_ty_body.len; } else if (extra.data.bits.has_ret_ty_ref) { - ret_ty_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; } @@ -2266,7 +2266,7 @@ const Writer = struct { fn writeVarExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const extra = self.code.extraData(Zir.Inst.ExtendedVar, extended.operand); - const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); + const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); try self.writeInstRef(stream, extra.data.var_type); @@ -2277,12 +2277,12 @@ const Writer = struct { try stream.print(", lib_name=\"{}\"", .{std.zig.fmtEscapes(lib_name)}); } const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: { - const align_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const align_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk align_inst; }; const init_inst: Zir.Inst.Ref = if (!small.has_init) .none else blk: { - const init_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const init_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk init_inst; }; @@ -2295,17 +2295,17 @@ const Writer = struct { fn writeAllocExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const extra = self.code.extraData(Zir.Inst.AllocExtended, extended.operand); - const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small); + const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small)); const src = LazySrcLoc.nodeOffset(extra.data.src_node); var extra_index: usize = extra.end; const type_inst: Zir.Inst.Ref = if (!small.has_type) .none else blk: { - const type_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const type_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk type_inst; }; const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: { - const align_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]); + const align_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])); extra_index += 1; break :blk align_inst; }; @@ -2473,8 +2473,8 @@ const Writer = struct { try stream.writeAll(") "); if (body.len != 0) { try stream.print("(lbrace={d}:{d},rbrace={d}:{d}) ", .{ - src_locs.lbrace_line + 1, @truncate(u16, src_locs.columns) + 1, - src_locs.rbrace_line + 1, @truncate(u16, src_locs.columns >> 16) + 1, + src_locs.lbrace_line + 1, @as(u16, @truncate(src_locs.columns)) + 1, + src_locs.rbrace_line + 1, @as(u16, @truncate(src_locs.columns >> 16)) + 1, }); } try self.writeSrc(stream, src); @@ -2507,7 +2507,7 @@ const Writer = struct { fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { const i = @intFromEnum(ref); - if (i < InternPool.static_len) return stream.print("@{}", .{@enumFromInt(InternPool.Index, i)}); + if (i < InternPool.static_len) return stream.print("@{}", .{@as(InternPool.Index, @enumFromInt(i))}); return self.writeInstIndex(stream, i - InternPool.static_len); } diff --git a/src/register_manager.zig b/src/register_manager.zig index f9e2daeab195..322f623eec43 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -427,13 +427,13 @@ const MockRegister3 = enum(u3) { pub fn id(reg: MockRegister3) u3 { return switch (@intFromEnum(reg)) { - 0...3 => @as(u3, @truncate(u2, @intFromEnum(reg))), + 0...3 => @as(u3, @as(u2, @truncate(@intFromEnum(reg)))), 4...7 => @intFromEnum(reg), }; } pub fn enc(reg: MockRegister3) u2 { - return @truncate(u2, @intFromEnum(reg)); + return @as(u2, @truncate(@intFromEnum(reg))); } const gp_regs = [_]MockRegister3{ .r0, .r1, .r2, .r3 }; diff --git a/src/tracy.zig b/src/tracy.zig index 580e29805a49..10f24100912b 100644 --- a/src/tracy.zig +++ b/src/tracy.zig @@ -132,7 +132,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } fn allocFn(ptr: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr)); + const self: *Self = @ptrCast(@alignCast(ptr)); const result = self.parent_allocator.rawAlloc(len, ptr_align, ret_addr); if (result) |data| { if (len != 0) { @@ -149,7 +149,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } fn resizeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr)); + const self: *Self = @ptrCast(@alignCast(ptr)); if (self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr)) { if (name) |n| { freeNamed(buf.ptr, n); @@ -168,7 +168,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } fn freeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr)); + const self: *Self = @ptrCast(@alignCast(ptr)); self.parent_allocator.rawFree(buf, buf_align, ret_addr); // this condition is to handle free being called on an empty slice that was never even allocated // example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}` diff --git a/src/translate_c.zig b/src/translate_c.zig index 4078bd0f346c..6f208b34928b 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -467,7 +467,7 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void { const entity = it.deref(); switch (entity.getKind()) { .MacroDefinitionKind => { - const macro = @ptrCast(*clang.MacroDefinitionRecord, entity); + const macro = @as(*clang.MacroDefinitionRecord, @ptrCast(entity)); const raw_name = macro.getName_getNameStart(); const name = try c.str(raw_name); @@ -481,13 +481,13 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void { } fn declVisitorNamesOnlyC(context: ?*anyopaque, decl: *const clang.Decl) callconv(.C) bool { - const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context)); + const c: *Context = @ptrCast(@alignCast(context)); declVisitorNamesOnly(c, decl) catch return false; return true; } fn declVisitorC(context: ?*anyopaque, decl: *const clang.Decl) callconv(.C) bool { - const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context)); + const c: *Context = @ptrCast(@alignCast(context)); declVisitor(c, decl) catch return false; return true; } @@ -499,37 +499,37 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void { // Check for typedefs with unnamed enum/record child types. if (decl.getKind() == .Typedef) { - const typedef_decl = @ptrCast(*const clang.TypedefNameDecl, decl); + const typedef_decl = @as(*const clang.TypedefNameDecl, @ptrCast(decl)); var child_ty = typedef_decl.getUnderlyingType().getTypePtr(); const addr: usize = while (true) switch (child_ty.getTypeClass()) { .Enum => { - const enum_ty = @ptrCast(*const clang.EnumType, child_ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(child_ty)); const enum_decl = enum_ty.getDecl(); // check if this decl is unnamed - if (@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()[0] != 0) return; + if (@as(*const clang.NamedDecl, @ptrCast(enum_decl)).getName_bytes_begin()[0] != 0) return; break @intFromPtr(enum_decl.getCanonicalDecl()); }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, child_ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(child_ty)); const record_decl = record_ty.getDecl(); // check if this decl is unnamed - if (@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()[0] != 0) return; + if (@as(*const clang.NamedDecl, @ptrCast(record_decl)).getName_bytes_begin()[0] != 0) return; break @intFromPtr(record_decl.getCanonicalDecl()); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, child_ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(child_ty)); child_ty = elaborated_ty.getNamedType().getTypePtr(); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, child_ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(child_ty)); child_ty = decayed_ty.getDecayedType().getTypePtr(); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, child_ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(child_ty)); child_ty = attributed_ty.getEquivalentType().getTypePtr(); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, child_ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(child_ty)); child_ty = macroqualified_ty.getModifiedType().getTypePtr(); }, else => return, @@ -552,25 +552,25 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void { fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void { switch (decl.getKind()) { .Function => { - return visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl)); + return visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl))); }, .Typedef => { - try transTypeDef(c, &c.global_scope.base, @ptrCast(*const clang.TypedefNameDecl, decl)); + try transTypeDef(c, &c.global_scope.base, @as(*const clang.TypedefNameDecl, @ptrCast(decl))); }, .Enum => { - try transEnumDecl(c, &c.global_scope.base, @ptrCast(*const clang.EnumDecl, decl)); + try transEnumDecl(c, &c.global_scope.base, @as(*const clang.EnumDecl, @ptrCast(decl))); }, .Record => { - try transRecordDecl(c, &c.global_scope.base, @ptrCast(*const clang.RecordDecl, decl)); + try transRecordDecl(c, &c.global_scope.base, @as(*const clang.RecordDecl, @ptrCast(decl))); }, .Var => { - return visitVarDecl(c, @ptrCast(*const clang.VarDecl, decl), null); + return visitVarDecl(c, @as(*const clang.VarDecl, @ptrCast(decl)), null); }, .Empty => { // Do nothing }, .FileScopeAsm => { - try transFileScopeAsm(c, &c.global_scope.base, @ptrCast(*const clang.FileScopeAsmDecl, decl)); + try transFileScopeAsm(c, &c.global_scope.base, @as(*const clang.FileScopeAsmDecl, @ptrCast(decl))); }, else => { const decl_name = try c.str(decl.getDeclKindName()); @@ -595,7 +595,7 @@ fn transFileScopeAsm(c: *Context, scope: *Scope, file_scope_asm: *const clang.Fi } fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { - const fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin()); + const fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin()); if (c.global_scope.sym_table.contains(fn_name)) return; // Avoid processing this decl twice @@ -630,22 +630,22 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { switch (fn_type.getTypeClass()) { .Attributed => { - const attr_type = @ptrCast(*const clang.AttributedType, fn_type); + const attr_type = @as(*const clang.AttributedType, @ptrCast(fn_type)); fn_qt = attr_type.getEquivalentType(); }, .Paren => { - const paren_type = @ptrCast(*const clang.ParenType, fn_type); + const paren_type = @as(*const clang.ParenType, @ptrCast(fn_type)); fn_qt = paren_type.getInnerType(); }, else => break fn_type, } }; - const fn_ty = @ptrCast(*const clang.FunctionType, fn_type); + const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_type)); const return_qt = fn_ty.getReturnType(); const proto_node = switch (fn_type.getTypeClass()) { .FunctionProto => blk: { - const fn_proto_type = @ptrCast(*const clang.FunctionProtoType, fn_type); + const fn_proto_type = @as(*const clang.FunctionProtoType, @ptrCast(fn_type)); if (has_body and fn_proto_type.isVariadic()) { decl_ctx.has_body = false; decl_ctx.storage_class = .Extern; @@ -661,7 +661,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { }; }, .FunctionNoProto => blk: { - const fn_no_proto_type = @ptrCast(*const clang.FunctionType, fn_type); + const fn_no_proto_type = @as(*const clang.FunctionType, @ptrCast(fn_type)); break :blk transFnNoProto(c, fn_no_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) { error.UnsupportedType => { return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function", .{}); @@ -714,7 +714,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { param_id += 1; } - const casted_body = @ptrCast(*const clang.CompoundStmt, body_stmt); + const casted_body = @as(*const clang.CompoundStmt, @ptrCast(body_stmt)); transCompoundStmtInline(c, casted_body, &block_scope) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.UnsupportedTranslation, @@ -788,7 +788,7 @@ fn stringLiteralToCharStar(c: *Context, str: Node) Error!Node { /// if mangled_name is not null, this var decl was declared in a block scope. fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]const u8) Error!void { - const var_name = mangled_name orelse try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin()); + const var_name = mangled_name orelse try c.str(@as(*const clang.NamedDecl, @ptrCast(var_decl)).getName_bytes_begin()); if (c.global_scope.sym_table.contains(var_name)) return; // Avoid processing this decl twice @@ -830,7 +830,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co if (has_init) trans_init: { if (decl_init) |expr| { const node_or_error = if (expr.getStmtClass() == .StringLiteralClass) - transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node) + transStringLiteralInitializer(c, @as(*const clang.StringLiteral, @ptrCast(expr)), type_node) else transExprCoercing(c, scope, expr, .used); init_node = node_or_error catch |err| switch (err) { @@ -918,7 +918,7 @@ fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNa const toplevel = scope.id == .root; const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined; - var name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin()); + var name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(typedef_decl)).getName_bytes_begin()); try c.typedefs.put(c.gpa, name, {}); if (builtin_typedef_map.get(name)) |builtin| { @@ -981,7 +981,7 @@ fn buildFlexibleArrayFn( .is_noalias = false, }; - const array_type = @ptrCast(*const clang.ArrayType, field_qt.getTypePtr()); + const array_type = @as(*const clang.ArrayType, @ptrCast(field_qt.getTypePtr())); const element_qt = array_type.getElementType(); const element_type = try transQualType(c, scope, element_qt, field_decl.getLocation()); @@ -1077,7 +1077,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD var is_union = false; var container_kind_name: []const u8 = undefined; - var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()); + var bare_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(record_decl)).getName_bytes_begin()); if (record_decl.isUnion()) { container_kind_name = "union"; @@ -1138,7 +1138,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD } var is_anon = false; - var field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + var field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); if (field_decl.isAnonymousStructOrUnion() or field_name.len == 0) { // Context.getMangle() is not used here because doing so causes unpredictable field names for anonymous fields. field_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{unnamed_field_count}); @@ -1167,7 +1167,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD }; const alignment = if (has_flexible_array and field_decl.getFieldIndex() == 0) - @intCast(c_uint, record_alignment) + @as(c_uint, @intCast(record_alignment)) else ClangAlignment.forField(c, field_decl, record_def).zigAlignment(); @@ -1224,7 +1224,7 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined; var is_unnamed = false; - var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()); + var bare_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(enum_decl)).getName_bytes_begin()); var name = bare_name; if (c.unnamed_typedefs.get(@intFromPtr(enum_decl.getCanonicalDecl()))) |typedef_name| { bare_name = typedef_name; @@ -1244,13 +1244,13 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E const end_it = enum_def.enumerator_end(); while (it.neq(end_it)) : (it = it.next()) { const enum_const = it.deref(); - var enum_val_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_const).getName_bytes_begin()); + var enum_val_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(enum_const)).getName_bytes_begin()); if (!toplevel) { enum_val_name = try bs.makeMangledName(c, enum_val_name); } - const enum_const_qt = @ptrCast(*const clang.ValueDecl, enum_const).getType(); - const enum_const_loc = @ptrCast(*const clang.Decl, enum_const).getLocation(); + const enum_const_qt = @as(*const clang.ValueDecl, @ptrCast(enum_const)).getType(); + const enum_const_loc = @as(*const clang.Decl, @ptrCast(enum_const)).getLocation(); const enum_const_type_node: ?Node = transQualType(c, scope, enum_const_qt, enum_const_loc) catch |err| switch (err) { error.UnsupportedType => null, else => |e| return e, @@ -1325,77 +1325,77 @@ fn transStmt( ) TransError!Node { const sc = stmt.getStmtClass(); switch (sc) { - .BinaryOperatorClass => return transBinaryOperator(c, scope, @ptrCast(*const clang.BinaryOperator, stmt), result_used), - .CompoundStmtClass => return transCompoundStmt(c, scope, @ptrCast(*const clang.CompoundStmt, stmt)), - .CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @ptrCast(*const clang.CStyleCastExpr, stmt), result_used), - .DeclStmtClass => return transDeclStmt(c, scope, @ptrCast(*const clang.DeclStmt, stmt)), - .DeclRefExprClass => return transDeclRefExpr(c, scope, @ptrCast(*const clang.DeclRefExpr, stmt)), - .ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @ptrCast(*const clang.ImplicitCastExpr, stmt), result_used), - .IntegerLiteralClass => return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, stmt), result_used, .with_as), - .ReturnStmtClass => return transReturnStmt(c, scope, @ptrCast(*const clang.ReturnStmt, stmt)), - .StringLiteralClass => return transStringLiteral(c, scope, @ptrCast(*const clang.StringLiteral, stmt), result_used), + .BinaryOperatorClass => return transBinaryOperator(c, scope, @as(*const clang.BinaryOperator, @ptrCast(stmt)), result_used), + .CompoundStmtClass => return transCompoundStmt(c, scope, @as(*const clang.CompoundStmt, @ptrCast(stmt))), + .CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @as(*const clang.CStyleCastExpr, @ptrCast(stmt)), result_used), + .DeclStmtClass => return transDeclStmt(c, scope, @as(*const clang.DeclStmt, @ptrCast(stmt))), + .DeclRefExprClass => return transDeclRefExpr(c, scope, @as(*const clang.DeclRefExpr, @ptrCast(stmt))), + .ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @as(*const clang.ImplicitCastExpr, @ptrCast(stmt)), result_used), + .IntegerLiteralClass => return transIntegerLiteral(c, scope, @as(*const clang.IntegerLiteral, @ptrCast(stmt)), result_used, .with_as), + .ReturnStmtClass => return transReturnStmt(c, scope, @as(*const clang.ReturnStmt, @ptrCast(stmt))), + .StringLiteralClass => return transStringLiteral(c, scope, @as(*const clang.StringLiteral, @ptrCast(stmt)), result_used), .ParenExprClass => { - const expr = try transExpr(c, scope, @ptrCast(*const clang.ParenExpr, stmt).getSubExpr(), .used); + const expr = try transExpr(c, scope, @as(*const clang.ParenExpr, @ptrCast(stmt)).getSubExpr(), .used); return maybeSuppressResult(c, result_used, expr); }, - .InitListExprClass => return transInitListExpr(c, scope, @ptrCast(*const clang.InitListExpr, stmt), result_used), - .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt)), - .IfStmtClass => return transIfStmt(c, scope, @ptrCast(*const clang.IfStmt, stmt)), - .WhileStmtClass => return transWhileLoop(c, scope, @ptrCast(*const clang.WhileStmt, stmt)), - .DoStmtClass => return transDoWhileLoop(c, scope, @ptrCast(*const clang.DoStmt, stmt)), + .InitListExprClass => return transInitListExpr(c, scope, @as(*const clang.InitListExpr, @ptrCast(stmt)), result_used), + .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @as(*const clang.Expr, @ptrCast(stmt))), + .IfStmtClass => return transIfStmt(c, scope, @as(*const clang.IfStmt, @ptrCast(stmt))), + .WhileStmtClass => return transWhileLoop(c, scope, @as(*const clang.WhileStmt, @ptrCast(stmt))), + .DoStmtClass => return transDoWhileLoop(c, scope, @as(*const clang.DoStmt, @ptrCast(stmt))), .NullStmtClass => { return Tag.empty_block.init(); }, .ContinueStmtClass => return Tag.@"continue".init(), .BreakStmtClass => return Tag.@"break".init(), - .ForStmtClass => return transForLoop(c, scope, @ptrCast(*const clang.ForStmt, stmt)), - .FloatingLiteralClass => return transFloatingLiteral(c, @ptrCast(*const clang.FloatingLiteral, stmt), result_used), + .ForStmtClass => return transForLoop(c, scope, @as(*const clang.ForStmt, @ptrCast(stmt))), + .FloatingLiteralClass => return transFloatingLiteral(c, @as(*const clang.FloatingLiteral, @ptrCast(stmt)), result_used), .ConditionalOperatorClass => { - return transConditionalOperator(c, scope, @ptrCast(*const clang.ConditionalOperator, stmt), result_used); + return transConditionalOperator(c, scope, @as(*const clang.ConditionalOperator, @ptrCast(stmt)), result_used); }, .BinaryConditionalOperatorClass => { - return transBinaryConditionalOperator(c, scope, @ptrCast(*const clang.BinaryConditionalOperator, stmt), result_used); + return transBinaryConditionalOperator(c, scope, @as(*const clang.BinaryConditionalOperator, @ptrCast(stmt)), result_used); }, - .SwitchStmtClass => return transSwitch(c, scope, @ptrCast(*const clang.SwitchStmt, stmt)), + .SwitchStmtClass => return transSwitch(c, scope, @as(*const clang.SwitchStmt, @ptrCast(stmt))), .CaseStmtClass, .DefaultStmtClass => { return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO complex switch", .{}); }, - .ConstantExprClass => return transConstantExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used), - .PredefinedExprClass => return transPredefinedExpr(c, scope, @ptrCast(*const clang.PredefinedExpr, stmt), result_used), - .CharacterLiteralClass => return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, stmt), result_used, .with_as), - .StmtExprClass => return transStmtExpr(c, scope, @ptrCast(*const clang.StmtExpr, stmt), result_used), - .MemberExprClass => return transMemberExpr(c, scope, @ptrCast(*const clang.MemberExpr, stmt), result_used), - .ArraySubscriptExprClass => return transArrayAccess(c, scope, @ptrCast(*const clang.ArraySubscriptExpr, stmt), result_used), - .CallExprClass => return transCallExpr(c, scope, @ptrCast(*const clang.CallExpr, stmt), result_used), - .UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @ptrCast(*const clang.UnaryExprOrTypeTraitExpr, stmt), result_used), - .UnaryOperatorClass => return transUnaryOperator(c, scope, @ptrCast(*const clang.UnaryOperator, stmt), result_used), - .CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @ptrCast(*const clang.CompoundAssignOperator, stmt), result_used), + .ConstantExprClass => return transConstantExpr(c, scope, @as(*const clang.Expr, @ptrCast(stmt)), result_used), + .PredefinedExprClass => return transPredefinedExpr(c, scope, @as(*const clang.PredefinedExpr, @ptrCast(stmt)), result_used), + .CharacterLiteralClass => return transCharLiteral(c, scope, @as(*const clang.CharacterLiteral, @ptrCast(stmt)), result_used, .with_as), + .StmtExprClass => return transStmtExpr(c, scope, @as(*const clang.StmtExpr, @ptrCast(stmt)), result_used), + .MemberExprClass => return transMemberExpr(c, scope, @as(*const clang.MemberExpr, @ptrCast(stmt)), result_used), + .ArraySubscriptExprClass => return transArrayAccess(c, scope, @as(*const clang.ArraySubscriptExpr, @ptrCast(stmt)), result_used), + .CallExprClass => return transCallExpr(c, scope, @as(*const clang.CallExpr, @ptrCast(stmt)), result_used), + .UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @as(*const clang.UnaryExprOrTypeTraitExpr, @ptrCast(stmt)), result_used), + .UnaryOperatorClass => return transUnaryOperator(c, scope, @as(*const clang.UnaryOperator, @ptrCast(stmt)), result_used), + .CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @as(*const clang.CompoundAssignOperator, @ptrCast(stmt)), result_used), .OpaqueValueExprClass => { - const source_expr = @ptrCast(*const clang.OpaqueValueExpr, stmt).getSourceExpr().?; + const source_expr = @as(*const clang.OpaqueValueExpr, @ptrCast(stmt)).getSourceExpr().?; const expr = try transExpr(c, scope, source_expr, .used); return maybeSuppressResult(c, result_used, expr); }, - .OffsetOfExprClass => return transOffsetOfExpr(c, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used), + .OffsetOfExprClass => return transOffsetOfExpr(c, @as(*const clang.OffsetOfExpr, @ptrCast(stmt)), result_used), .CompoundLiteralExprClass => { - const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt); + const compound_literal = @as(*const clang.CompoundLiteralExpr, @ptrCast(stmt)); return transExpr(c, scope, compound_literal.getInitializer(), result_used); }, .GenericSelectionExprClass => { - const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, stmt); + const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(stmt)); return transExpr(c, scope, gen_sel.getResultExpr(), result_used); }, .ConvertVectorExprClass => { - const conv_vec = @ptrCast(*const clang.ConvertVectorExpr, stmt); + const conv_vec = @as(*const clang.ConvertVectorExpr, @ptrCast(stmt)); const conv_vec_node = try transConvertVectorExpr(c, scope, conv_vec); return maybeSuppressResult(c, result_used, conv_vec_node); }, .ShuffleVectorExprClass => { - const shuffle_vec_expr = @ptrCast(*const clang.ShuffleVectorExpr, stmt); + const shuffle_vec_expr = @as(*const clang.ShuffleVectorExpr, @ptrCast(stmt)); const shuffle_vec_node = try transShuffleVectorExpr(c, scope, shuffle_vec_expr); return maybeSuppressResult(c, result_used, shuffle_vec_node); }, .ChooseExprClass => { - const choose_expr = @ptrCast(*const clang.ChooseExpr, stmt); + const choose_expr = @as(*const clang.ChooseExpr, @ptrCast(stmt)); return transExpr(c, scope, choose_expr.getChosenSubExpr(), result_used); }, // When adding new cases here, see comment for maybeBlockify() @@ -1421,21 +1421,21 @@ fn transConvertVectorExpr( scope: *Scope, expr: *const clang.ConvertVectorExpr, ) TransError!Node { - const base_stmt = @ptrCast(*const clang.Stmt, expr); + const base_stmt = @as(*const clang.Stmt, @ptrCast(expr)); var block_scope = try Scope.Block.init(c, scope, true); defer block_scope.deinit(); const src_expr = expr.getSrcExpr(); const src_type = qualTypeCanon(src_expr.getType()); - const src_vector_ty = @ptrCast(*const clang.VectorType, src_type); + const src_vector_ty = @as(*const clang.VectorType, @ptrCast(src_type)); const src_element_qt = src_vector_ty.getElementType(); const src_expr_node = try transExpr(c, &block_scope.base, src_expr, .used); const dst_qt = expr.getTypeSourceInfo_getType(); const dst_type_node = try transQualType(c, &block_scope.base, dst_qt, base_stmt.getBeginLoc()); - const dst_vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(dst_qt)); + const dst_vector_ty = @as(*const clang.VectorType, @ptrCast(qualTypeCanon(dst_qt))); const num_elements = dst_vector_ty.getNumElements(); const dst_element_qt = dst_vector_ty.getElementType(); @@ -1490,7 +1490,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE const init_list = try c.arena.alloc(Node, mask_len); for (init_list, 0..) |*init, i| { - const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used); + const index_expr = try transExprCoercing(c, scope, expr.getExpr(@as(c_uint, @intCast(i + 2))), .used); const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len }); init.* = converted_index; } @@ -1514,7 +1514,7 @@ fn transShuffleVectorExpr( scope: *Scope, expr: *const clang.ShuffleVectorExpr, ) TransError!Node { - const base_expr = @ptrCast(*const clang.Expr, expr); + const base_expr = @as(*const clang.Expr, @ptrCast(expr)); const num_subexprs = expr.getNumSubExprs(); if (num_subexprs < 3) return fail(c, error.UnsupportedTranslation, base_expr.getBeginLoc(), "ShuffleVector needs at least 1 index", .{}); @@ -1545,7 +1545,7 @@ fn transSimpleOffsetOfExpr(c: *Context, expr: *const clang.OffsetOfExpr) TransEr if (c.decl_table.get(@intFromPtr(record_decl.getCanonicalDecl()))) |type_name| { const type_node = try Tag.type.create(c.arena, type_name); - var raw_field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + var raw_field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); const quoted_field_name = try std.fmt.allocPrint(c.arena, "\"{s}\"", .{raw_field_name}); const field_name_node = try Tag.string_literal.create(c.arena, quoted_field_name); @@ -1829,7 +1829,7 @@ fn transCStyleCastExprClass( stmt: *const clang.CStyleCastExpr, result_used: ResultUsed, ) TransError!Node { - const cast_expr = @ptrCast(*const clang.CastExpr, stmt); + const cast_expr = @as(*const clang.CastExpr, @ptrCast(stmt)); const sub_expr = stmt.getSubExpr(); const dst_type = stmt.getType(); const src_type = sub_expr.getType(); @@ -1838,7 +1838,7 @@ fn transCStyleCastExprClass( const cast_node = if (cast_expr.getCastKind() == .ToUnion) blk: { const field_decl = cast_expr.getTargetFieldForToUnionCast(dst_type, src_type).?; // C syntax error if target field is null - const field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + const field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); const union_ty = try transQualType(c, scope, dst_type, loc); @@ -1923,12 +1923,12 @@ fn transDeclStmtOne( ) TransError!void { switch (decl.getKind()) { .Var => { - const var_decl = @ptrCast(*const clang.VarDecl, decl); + const var_decl = @as(*const clang.VarDecl, @ptrCast(decl)); const decl_init = var_decl.getInit(); const loc = decl.getLocation(); const qual_type = var_decl.getTypeSourceInfo_getType(); - const name = try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin()); + const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(var_decl)).getName_bytes_begin()); const mangled_name = try block_scope.makeMangledName(c, name); if (var_decl.getStorageClass() == .Extern) { @@ -1945,7 +1945,7 @@ fn transDeclStmtOne( var init_node = if (decl_init) |expr| if (expr.getStmtClass() == .StringLiteralClass) - try transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node) + try transStringLiteralInitializer(c, @as(*const clang.StringLiteral, @ptrCast(expr)), type_node) else try transExprCoercing(c, scope, expr, .used) else if (is_static_local) @@ -1980,7 +1980,7 @@ fn transDeclStmtOne( const cleanup_attr = var_decl.getCleanupAttribute(); if (cleanup_attr) |fn_decl| { - const cleanup_fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin()); + const cleanup_fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin()); const fn_id = try Tag.identifier.create(c.arena, cleanup_fn_name); const varname = try Tag.identifier.create(c.arena, mangled_name); @@ -1995,16 +1995,16 @@ fn transDeclStmtOne( } }, .Typedef => { - try transTypeDef(c, scope, @ptrCast(*const clang.TypedefNameDecl, decl)); + try transTypeDef(c, scope, @as(*const clang.TypedefNameDecl, @ptrCast(decl))); }, .Record => { - try transRecordDecl(c, scope, @ptrCast(*const clang.RecordDecl, decl)); + try transRecordDecl(c, scope, @as(*const clang.RecordDecl, @ptrCast(decl))); }, .Enum => { - try transEnumDecl(c, scope, @ptrCast(*const clang.EnumDecl, decl)); + try transEnumDecl(c, scope, @as(*const clang.EnumDecl, @ptrCast(decl))); }, .Function => { - try visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl)); + try visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl))); }, else => { const decl_name = try c.str(decl.getDeclKindName()); @@ -2030,15 +2030,15 @@ fn transDeclRefExpr( expr: *const clang.DeclRefExpr, ) TransError!Node { const value_decl = expr.getDecl(); - const name = try c.str(@ptrCast(*const clang.NamedDecl, value_decl).getName_bytes_begin()); + const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(value_decl)).getName_bytes_begin()); const mangled_name = scope.getAlias(name); - var ref_expr = if (cIsFunctionDeclRef(@ptrCast(*const clang.Expr, expr))) + var ref_expr = if (cIsFunctionDeclRef(@as(*const clang.Expr, @ptrCast(expr)))) try Tag.fn_identifier.create(c.arena, mangled_name) else try Tag.identifier.create(c.arena, mangled_name); - if (@ptrCast(*const clang.Decl, value_decl).getKind() == .Var) { - const var_decl = @ptrCast(*const clang.VarDecl, value_decl); + if (@as(*const clang.Decl, @ptrCast(value_decl)).getKind() == .Var) { + const var_decl = @as(*const clang.VarDecl, @ptrCast(value_decl)); if (var_decl.isStaticLocal()) { ref_expr = try Tag.field_access.create(c.arena, .{ .lhs = ref_expr, @@ -2057,7 +2057,7 @@ fn transImplicitCastExpr( result_used: ResultUsed, ) TransError!Node { const sub_expr = expr.getSubExpr(); - const dest_type = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); + const dest_type = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr))); const src_type = getExprQualType(c, sub_expr); switch (expr.getCastKind()) { .BitCast, .FloatingCast, .FloatingToIntegral, .IntegralToFloating, .IntegralCast, .PointerToIntegral, .IntegralToPointer => { @@ -2111,7 +2111,7 @@ fn transImplicitCastExpr( else => |kind| return fail( c, error.UnsupportedTranslation, - @ptrCast(*const clang.Stmt, expr).getBeginLoc(), + @as(*const clang.Stmt, @ptrCast(expr)).getBeginLoc(), "unsupported CastKind {s}", .{@tagName(kind)}, ), @@ -2141,9 +2141,9 @@ fn transBoolExpr( expr: *const clang.Expr, used: ResultUsed, ) TransError!Node { - if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) { + if (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass() == .IntegerLiteralClass) { var signum: c_int = undefined; - if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) { + if (!(@as(*const clang.IntegerLiteral, @ptrCast(expr)).getSignum(&signum, c.clang_context))) { return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "invalid integer literal", .{}); } const is_zero = signum == 0; @@ -2168,20 +2168,20 @@ fn exprIsBooleanType(expr: *const clang.Expr) bool { fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool { switch (expr.getStmtClass()) { .StringLiteralClass => { - const string_lit = @ptrCast(*const clang.StringLiteral, expr); + const string_lit = @as(*const clang.StringLiteral, @ptrCast(expr)); return string_lit.getCharByteWidth() == 1; }, .PredefinedExprClass => return true, .UnaryOperatorClass => { - const op_expr = @ptrCast(*const clang.UnaryOperator, expr).getSubExpr(); + const op_expr = @as(*const clang.UnaryOperator, @ptrCast(expr)).getSubExpr(); return exprIsNarrowStringLiteral(op_expr); }, .ParenExprClass => { - const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr(); + const op_expr = @as(*const clang.ParenExpr, @ptrCast(expr)).getSubExpr(); return exprIsNarrowStringLiteral(op_expr); }, .GenericSelectionExprClass => { - const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr); + const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(expr)); return exprIsNarrowStringLiteral(gen_sel.getResultExpr()); }, else => return false, @@ -2190,11 +2190,11 @@ fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool { fn exprIsFlexibleArrayRef(c: *Context, expr: *const clang.Expr) bool { if (expr.getStmtClass() == .MemberExprClass) { - const member_expr = @ptrCast(*const clang.MemberExpr, expr); + const member_expr = @as(*const clang.MemberExpr, @ptrCast(expr)); const member_decl = member_expr.getMemberDecl(); - const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind(); + const decl_kind = @as(*const clang.Decl, @ptrCast(member_decl)).getKind(); if (decl_kind == .Field) { - const field_decl = @ptrCast(*const clang.FieldDecl, member_decl); + const field_decl = @as(*const clang.FieldDecl, @ptrCast(member_decl)); return isFlexibleArrayFieldDecl(c, field_decl); } } @@ -2229,7 +2229,7 @@ fn finishBoolExpr( ) TransError!Node { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); switch (builtin_ty.getKind()) { .Bool => return node, @@ -2273,7 +2273,7 @@ fn finishBoolExpr( return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.null_literal.init() }); }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); const underlying_type = typedef_decl.getUnderlyingType(); return finishBoolExpr(c, scope, loc, underlying_type.getTypePtr(), node, used); @@ -2283,7 +2283,7 @@ fn finishBoolExpr( return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() }); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); const named_type = elaborated_ty.getNamedType(); return finishBoolExpr(c, scope, loc, named_type.getTypePtr(), node, used); }, @@ -2325,7 +2325,7 @@ fn transIntegerLiteral( // But the first step is to be correct, and the next step is to make the output more elegant. // @as(T, x) - const expr_base = @ptrCast(*const clang.Expr, expr); + const expr_base = @as(*const clang.Expr, @ptrCast(expr)); const ty_node = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()); const rhs = try transCreateNodeAPInt(c, eval_result.Val.getInt()); const as = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = rhs }); @@ -2374,7 +2374,7 @@ fn transStringLiteral( const str_type = @tagName(stmt.getKind()); const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() }); - const expr_base = @ptrCast(*const clang.Expr, stmt); + const expr_base = @as(*const clang.Expr, @ptrCast(stmt)); const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc()); const lit_array = try transStringLiteralInitializer(c, stmt, array_type); const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array }); @@ -2451,11 +2451,11 @@ fn transStringLiteralInitializer( /// both operands resolve to addresses. The C standard requires that both operands /// point to elements of the same array object, but we do not verify that here. fn cIsPointerDiffExpr(stmt: *const clang.BinaryOperator) bool { - const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS()); - const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS()); + const lhs = @as(*const clang.Stmt, @ptrCast(stmt.getLHS())); + const rhs = @as(*const clang.Stmt, @ptrCast(stmt.getRHS())); return stmt.getOpcode() == .Sub and - qualTypeIsPtr(@ptrCast(*const clang.Expr, lhs).getType()) and - qualTypeIsPtr(@ptrCast(*const clang.Expr, rhs).getType()); + qualTypeIsPtr(@as(*const clang.Expr, @ptrCast(lhs)).getType()) and + qualTypeIsPtr(@as(*const clang.Expr, @ptrCast(rhs)).getType()); } fn cIsEnum(qt: clang.QualType) bool { @@ -2472,7 +2472,7 @@ fn cIsVector(qt: clang.QualType) bool { fn cIntTypeForEnum(enum_qt: clang.QualType) clang.QualType { assert(cIsEnum(enum_qt)); const ty = enum_qt.getCanonicalType().getTypePtr(); - const enum_ty = @ptrCast(*const clang.EnumType, ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(ty)); const enum_decl = enum_ty.getDecl(); return enum_decl.getIntegerType(); } @@ -2588,29 +2588,29 @@ fn transCCast( } fn transExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node { - return transStmt(c, scope, @ptrCast(*const clang.Stmt, expr), used); + return transStmt(c, scope, @as(*const clang.Stmt, @ptrCast(expr)), used); } /// Same as `transExpr` but with the knowledge that the operand will be type coerced, and therefore /// an `@as` would be redundant. This is used to prevent redundant `@as` in integer literals. fn transExprCoercing(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node { - switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) { + switch (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass()) { .IntegerLiteralClass => { - return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, expr), .used, .no_as); + return transIntegerLiteral(c, scope, @as(*const clang.IntegerLiteral, @ptrCast(expr)), .used, .no_as); }, .CharacterLiteralClass => { - return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, expr), .used, .no_as); + return transCharLiteral(c, scope, @as(*const clang.CharacterLiteral, @ptrCast(expr)), .used, .no_as); }, .UnaryOperatorClass => { - const un_expr = @ptrCast(*const clang.UnaryOperator, expr); + const un_expr = @as(*const clang.UnaryOperator, @ptrCast(expr)); if (un_expr.getOpcode() == .Extension) { return transExprCoercing(c, scope, un_expr.getSubExpr(), used); } }, .ImplicitCastExprClass => { - const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr); + const cast_expr = @as(*const clang.ImplicitCastExpr, @ptrCast(expr)); const sub_expr = cast_expr.getSubExpr(); - switch (@ptrCast(*const clang.Stmt, sub_expr).getStmtClass()) { + switch (@as(*const clang.Stmt, @ptrCast(sub_expr)).getStmtClass()) { .IntegerLiteralClass, .CharacterLiteralClass => switch (cast_expr.getCastKind()) { .IntegralToFloating => return transExprCoercing(c, scope, sub_expr, used), .IntegralCast => { @@ -2634,15 +2634,15 @@ fn literalFitsInType(c: *Context, expr: *const clang.Expr, qt: clang.QualType) b const is_signed = cIsSignedInteger(qt); const width_max_int = (@as(u64, 1) << math.lossyCast(u6, width - @intFromBool(is_signed))) - 1; - switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) { + switch (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass()) { .CharacterLiteralClass => { - const char_lit = @ptrCast(*const clang.CharacterLiteral, expr); + const char_lit = @as(*const clang.CharacterLiteral, @ptrCast(expr)); const val = char_lit.getValue(); // If the val is less than the max int then it fits. return val <= width_max_int; }, .IntegerLiteralClass => { - const int_lit = @ptrCast(*const clang.IntegerLiteral, expr); + const int_lit = @as(*const clang.IntegerLiteral, @ptrCast(expr)); var eval_result: clang.ExprEvalResult = undefined; if (!int_lit.EvaluateAsInt(&eval_result, c.clang_context)) { return false; @@ -2695,7 +2695,7 @@ fn transInitListExprRecord( // Generate the field assignment expression: // .field_name = expr - var raw_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin()); + var raw_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin()); if (field_decl.isAnonymousStructOrUnion()) { const name = c.decl_table.get(@intFromPtr(field_decl.getCanonicalDecl())).?; raw_name = try c.arena.dupe(u8, name); @@ -2736,8 +2736,8 @@ fn transInitListExprArray( const child_qt = arr_type.getElementType(); const child_type = try transQualType(c, scope, child_qt, loc); const init_count = expr.getNumInits(); - assert(@ptrCast(*const clang.Type, arr_type).isConstantArrayType()); - const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, arr_type); + assert(@as(*const clang.Type, @ptrCast(arr_type)).isConstantArrayType()); + const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(arr_type)); const size_ap_int = const_arr_ty.getSize(); const all_count = size_ap_int.getLimitedValue(usize); const leftover_count = all_count - init_count; @@ -2757,7 +2757,7 @@ fn transInitListExprArray( const init_list = try c.arena.alloc(Node, init_count); for (init_list, 0..) |*init, i| { - const elem_expr = expr.getInit(@intCast(c_uint, i)); + const elem_expr = expr.getInit(@as(c_uint, @intCast(i))); init.* = try transExprCoercing(c, scope, elem_expr, .used); } const init_node = try Tag.array_init.create(c.arena, .{ @@ -2791,8 +2791,8 @@ fn transInitListExprVector( loc: clang.SourceLocation, expr: *const clang.InitListExpr, ) TransError!Node { - const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); - const vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(qt)); + const qt = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr))); + const vector_ty = @as(*const clang.VectorType, @ptrCast(qualTypeCanon(qt))); const init_count = expr.getNumInits(); const num_elements = vector_ty.getNumElements(); @@ -2822,7 +2822,7 @@ fn transInitListExprVector( var i: usize = 0; while (i < init_count) : (i += 1) { const mangled_name = try block_scope.makeMangledName(c, "tmp"); - const init_expr = expr.getInit(@intCast(c_uint, i)); + const init_expr = expr.getInit(@as(c_uint, @intCast(i))); const tmp_decl_node = try Tag.var_simple.create(c.arena, .{ .name = mangled_name, .init = try transExpr(c, &block_scope.base, init_expr, .used), @@ -2860,9 +2860,9 @@ fn transInitListExpr( expr: *const clang.InitListExpr, used: ResultUsed, ) TransError!Node { - const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr)); + const qt = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr))); var qual_type = qt.getTypePtr(); - const source_loc = @ptrCast(*const clang.Expr, expr).getBeginLoc(); + const source_loc = @as(*const clang.Expr, @ptrCast(expr)).getBeginLoc(); if (qualTypeWasDemotedToOpaque(c, qt)) { return fail(c, error.UnsupportedTranslation, source_loc, "cannot initialize opaque type", .{}); @@ -2900,7 +2900,7 @@ fn transZeroInitExpr( ) TransError!Node { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); switch (builtin_ty.getKind()) { .Bool => return Tag.false_literal.init(), .Char_U, @@ -2929,7 +2929,7 @@ fn transZeroInitExpr( }, .Pointer => return Tag.null_literal.init(), .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); return transZeroInitExpr( c, @@ -2998,7 +2998,7 @@ fn transIfStmt( }, }; defer cond_scope.deinit(); - const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond()); + const cond_expr = @as(*const clang.Expr, @ptrCast(stmt.getCond())); const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used); const then_stmt = stmt.getThen(); @@ -3034,7 +3034,7 @@ fn transWhileLoop( }, }; defer cond_scope.deinit(); - const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond()); + const cond_expr = @as(*const clang.Expr, @ptrCast(stmt.getCond())); const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used); var loop_scope = Scope{ @@ -3063,7 +3063,7 @@ fn transDoWhileLoop( }, }; defer cond_scope.deinit(); - const cond = try transBoolExpr(c, &cond_scope.base, @ptrCast(*const clang.Expr, stmt.getCond()), .used); + const cond = try transBoolExpr(c, &cond_scope.base, @as(*const clang.Expr, @ptrCast(stmt.getCond())), .used); const if_not_break = switch (cond.tag()) { .true_literal => { const body_node = try maybeBlockify(c, scope, stmt.getBody()); @@ -3184,7 +3184,7 @@ fn transSwitch( const body = stmt.getBody(); assert(body.getStmtClass() == .CompoundStmtClass); - const compound_stmt = @ptrCast(*const clang.CompoundStmt, body); + const compound_stmt = @as(*const clang.CompoundStmt, @ptrCast(body)); var it = compound_stmt.body_begin(); const end_it = compound_stmt.body_end(); // Iterate over switch body and collect all cases. @@ -3211,12 +3211,12 @@ fn transSwitch( }, .DefaultStmtClass => { has_default = true; - const default_stmt = @ptrCast(*const clang.DefaultStmt, it[0]); + const default_stmt = @as(*const clang.DefaultStmt, @ptrCast(it[0])); var sub = default_stmt.getSubStmt(); while (true) switch (sub.getStmtClass()) { - .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(), - .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(), + .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(), + .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(), else => break, }; @@ -3255,11 +3255,11 @@ fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *st .DefaultStmtClass => { seen_default = true; items.items.len = 0; - const default_stmt = @ptrCast(*const clang.DefaultStmt, sub); + const default_stmt = @as(*const clang.DefaultStmt, @ptrCast(sub)); sub = default_stmt.getSubStmt(); }, .CaseStmtClass => { - const case_stmt = @ptrCast(*const clang.CaseStmt, sub); + const case_stmt = @as(*const clang.CaseStmt, @ptrCast(sub)); if (seen_default) { items.items.len = 0; @@ -3326,10 +3326,10 @@ fn transSwitchProngStmtInline( return; }, .CaseStmtClass => { - var sub = @ptrCast(*const clang.CaseStmt, it[0]).getSubStmt(); + var sub = @as(*const clang.CaseStmt, @ptrCast(it[0])).getSubStmt(); while (true) switch (sub.getStmtClass()) { - .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(), - .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(), + .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(), + .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(), else => break, }; const result = try transStmt(c, &block.base, sub, .unused); @@ -3340,10 +3340,10 @@ fn transSwitchProngStmtInline( } }, .DefaultStmtClass => { - var sub = @ptrCast(*const clang.DefaultStmt, it[0]).getSubStmt(); + var sub = @as(*const clang.DefaultStmt, @ptrCast(it[0])).getSubStmt(); while (true) switch (sub.getStmtClass()) { - .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(), - .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(), + .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(), + .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(), else => break, }; const result = try transStmt(c, &block.base, sub, .unused); @@ -3354,7 +3354,7 @@ fn transSwitchProngStmtInline( } }, .CompoundStmtClass => { - const result = try transCompoundStmt(c, &block.base, @ptrCast(*const clang.CompoundStmt, it[0])); + const result = try transCompoundStmt(c, &block.base, @as(*const clang.CompoundStmt, @ptrCast(it[0]))); try block.statements.append(result); if (result.isNoreturn(true)) { return; @@ -3381,7 +3381,7 @@ fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: .Int => { // See comment in `transIntegerLiteral` for why this code is here. // @as(T, x) - const expr_base = @ptrCast(*const clang.Expr, expr); + const expr_base = @as(*const clang.Expr, @ptrCast(expr)); const as_node = try Tag.as.create(c.arena, .{ .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()), .rhs = try transCreateNodeAPInt(c, result.Val.getInt()), @@ -3400,7 +3400,7 @@ fn transPredefinedExpr(c: *Context, scope: *Scope, expr: *const clang.Predefined fn transCreateCharLitNode(c: *Context, narrow: bool, val: u32) TransError!Node { return Tag.char_literal.create(c.arena, if (narrow) - try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@intCast(u8, val)})}) + try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@as(u8, @intCast(val))})}) else try std.fmt.allocPrint(c.arena, "'\\u{{{x}}}'", .{val})); } @@ -3427,7 +3427,7 @@ fn transCharLiteral( } // See comment in `transIntegerLiteral` for why this code is here. // @as(T, x) - const expr_base = @ptrCast(*const clang.Expr, stmt); + const expr_base = @as(*const clang.Expr, @ptrCast(stmt)); const as_node = try Tag.as.create(c.arena, .{ .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()), .rhs = int_lit_node, @@ -3469,22 +3469,22 @@ fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, re const member_decl = stmt.getMemberDecl(); const name = blk: { - const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind(); + const decl_kind = @as(*const clang.Decl, @ptrCast(member_decl)).getKind(); // If we're referring to a anonymous struct/enum find the bogus name // we've assigned to it during the RecordDecl translation if (decl_kind == .Field) { - const field_decl = @ptrCast(*const clang.FieldDecl, member_decl); + const field_decl = @as(*const clang.FieldDecl, @ptrCast(member_decl)); if (field_decl.isAnonymousStructOrUnion()) { const name = c.decl_table.get(@intFromPtr(field_decl.getCanonicalDecl())).?; break :blk try c.arena.dupe(u8, name); } } - const decl = @ptrCast(*const clang.NamedDecl, member_decl); + const decl = @as(*const clang.NamedDecl, @ptrCast(member_decl)); break :blk try c.str(decl.getName_bytes_begin()); }; var node = try Tag.field_access.create(c.arena, .{ .lhs = container_node, .field_name = name }); - if (exprIsFlexibleArrayRef(c, @ptrCast(*const clang.Expr, stmt))) { + if (exprIsFlexibleArrayRef(c, @as(*const clang.Expr, @ptrCast(stmt)))) { node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &.{} }); } return maybeSuppressResult(c, result_used, node); @@ -3582,8 +3582,8 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip // Unwrap the base statement if it's an array decayed to a bare pointer type // so that we index the array itself var unwrapped_base = base_stmt; - if (@ptrCast(*const clang.Stmt, base_stmt).getStmtClass() == .ImplicitCastExprClass) { - const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, base_stmt); + if (@as(*const clang.Stmt, @ptrCast(base_stmt)).getStmtClass() == .ImplicitCastExprClass) { + const implicit_cast = @as(*const clang.ImplicitCastExpr, @ptrCast(base_stmt)); if (implicit_cast.getCastKind() == .ArrayToPointerDecay) { unwrapped_base = implicit_cast.getSubExpr(); @@ -3620,17 +3620,17 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip fn cIsFunctionDeclRef(expr: *const clang.Expr) bool { switch (expr.getStmtClass()) { .ParenExprClass => { - const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr(); + const op_expr = @as(*const clang.ParenExpr, @ptrCast(expr)).getSubExpr(); return cIsFunctionDeclRef(op_expr); }, .DeclRefExprClass => { - const decl_ref = @ptrCast(*const clang.DeclRefExpr, expr); + const decl_ref = @as(*const clang.DeclRefExpr, @ptrCast(expr)); const value_decl = decl_ref.getDecl(); const qt = value_decl.getType(); return qualTypeChildIsFnProto(qt); }, .ImplicitCastExprClass => { - const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, expr); + const implicit_cast = @as(*const clang.ImplicitCastExpr, @ptrCast(expr)); const cast_kind = implicit_cast.getCastKind(); if (cast_kind == .BuiltinFnToFnPtr) return true; if (cast_kind == .FunctionToPointerDecay) { @@ -3639,12 +3639,12 @@ fn cIsFunctionDeclRef(expr: *const clang.Expr) bool { return false; }, .UnaryOperatorClass => { - const un_op = @ptrCast(*const clang.UnaryOperator, expr); + const un_op = @as(*const clang.UnaryOperator, @ptrCast(expr)); const opcode = un_op.getOpcode(); return (opcode == .AddrOf or opcode == .Deref) and cIsFunctionDeclRef(un_op.getSubExpr()); }, .GenericSelectionExprClass => { - const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr); + const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(expr)); return cIsFunctionDeclRef(gen_sel.getResultExpr()); }, else => return false, @@ -3679,11 +3679,11 @@ fn transCallExpr(c: *Context, scope: *Scope, stmt: *const clang.CallExpr, result .Proto => |fn_proto| { const param_count = fn_proto.getNumParams(); if (i < param_count) { - const param_qt = fn_proto.getParamType(@intCast(c_uint, i)); + const param_qt = fn_proto.getParamType(@as(c_uint, @intCast(i))); if (isBoolRes(arg) and cIsNativeInt(param_qt)) { arg = try Tag.int_from_bool.create(c.arena, arg); } else if (arg.tag() == .string_literal and qualTypeIsCharStar(param_qt)) { - const loc = @ptrCast(*const clang.Stmt, stmt).getBeginLoc(); + const loc = @as(*const clang.Stmt, @ptrCast(stmt)).getBeginLoc(); const dst_type_node = try transQualType(c, scope, param_qt, loc); arg = try removeCVQualifiers(c, dst_type_node, arg); } @@ -3729,10 +3729,10 @@ fn qualTypeGetFnProto(qt: clang.QualType, is_ptr: *bool) ?ClangFunctionType { ty = child_qt.getTypePtr(); } if (ty.getTypeClass() == .FunctionProto) { - return ClangFunctionType{ .Proto = @ptrCast(*const clang.FunctionProtoType, ty) }; + return ClangFunctionType{ .Proto = @as(*const clang.FunctionProtoType, @ptrCast(ty)) }; } if (ty.getTypeClass() == .FunctionNoProto) { - return ClangFunctionType{ .NoProto = @ptrCast(*const clang.FunctionType, ty) }; + return ClangFunctionType{ .NoProto = @as(*const clang.FunctionType, @ptrCast(ty)) }; } return null; } @@ -4141,9 +4141,9 @@ fn transFloatingLiteral(c: *Context, expr: *const clang.FloatingLiteral, used: R fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node { // GNU extension of the ternary operator where the middle expression is // omitted, the condition itself is returned if it evaluates to true - const qt = @ptrCast(*const clang.Expr, stmt).getType(); + const qt = @as(*const clang.Expr, @ptrCast(stmt)).getType(); const res_is_bool = qualTypeIsBoolean(qt); - const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt); + const casted_stmt = @as(*const clang.AbstractConditionalOperator, @ptrCast(stmt)); const cond_expr = casted_stmt.getCond(); const false_expr = casted_stmt.getFalseExpr(); @@ -4203,9 +4203,9 @@ fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.Condi }; defer cond_scope.deinit(); - const qt = @ptrCast(*const clang.Expr, stmt).getType(); + const qt = @as(*const clang.Expr, @ptrCast(stmt)).getType(); const res_is_bool = qualTypeIsBoolean(qt); - const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt); + const casted_stmt = @as(*const clang.AbstractConditionalOperator, @ptrCast(stmt)); const cond_expr = casted_stmt.getCond(); const true_expr = casted_stmt.getTrueExpr(); const false_expr = casted_stmt.getFalseExpr(); @@ -4246,7 +4246,7 @@ fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: Node) !void { fn transQualTypeInitializedStringLiteral(c: *Context, elem_ty: Node, string_lit: *const clang.StringLiteral) TypeError!Node { const string_lit_size = string_lit.getLength(); - const array_size = @intCast(usize, string_lit_size); + const array_size = @as(usize, @intCast(string_lit_size)); // incomplete array initialized with empty string, will be translated as [1]T{0} // see https://github.com/ziglang/zig/issues/8256 @@ -4266,16 +4266,16 @@ fn transQualTypeInitialized( ) TypeError!Node { const ty = qt.getTypePtr(); if (ty.getTypeClass() == .IncompleteArray) { - const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty); + const incomplete_array_ty = @as(*const clang.IncompleteArrayType, @ptrCast(ty)); const elem_ty = try transType(c, scope, incomplete_array_ty.getElementType().getTypePtr(), source_loc); switch (decl_init.getStmtClass()) { .StringLiteralClass => { - const string_lit = @ptrCast(*const clang.StringLiteral, decl_init); + const string_lit = @as(*const clang.StringLiteral, @ptrCast(decl_init)); return transQualTypeInitializedStringLiteral(c, elem_ty, string_lit); }, .InitListExprClass => { - const init_expr = @ptrCast(*const clang.InitListExpr, decl_init); + const init_expr = @as(*const clang.InitListExpr, @ptrCast(decl_init)); const size = init_expr.getNumInits(); if (init_expr.isStringLiteralInit()) { @@ -4306,7 +4306,7 @@ fn transQualTypeIntWidthOf(c: *Context, ty: clang.QualType, is_signed: bool) Typ /// Asserts the type is an integer. fn transTypeIntWidthOf(c: *Context, ty: *const clang.Type, is_signed: bool) TypeError!Node { assert(ty.getTypeClass() == .Builtin); - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return Tag.type.create(c.arena, switch (builtin_ty.getKind()) { .Char_U, .Char_S, .UChar, .SChar, .Char8 => if (is_signed) "i8" else "u8", .UShort, .Short => if (is_signed) "c_short" else "c_ushort", @@ -4324,7 +4324,7 @@ fn isCBuiltinType(qt: clang.QualType, kind: clang.BuiltinTypeKind) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return builtin_ty.getKind() == kind; } @@ -4341,7 +4341,7 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); switch (builtin_ty.getKind()) { .Char_U, @@ -4358,9 +4358,9 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 { unreachable; }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); - const type_name = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin()); + const type_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(typedef_decl)).getName_bytes_begin()); if (mem.eql(u8, type_name, "uint8_t") or mem.eql(u8, type_name, "int8_t")) { return 8; @@ -4396,12 +4396,12 @@ fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType { blk: { // If this is a C `char *`, turn it into a `const char *` if (expr.getStmtClass() != .ImplicitCastExprClass) break :blk; - const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr); + const cast_expr = @as(*const clang.ImplicitCastExpr, @ptrCast(expr)); if (cast_expr.getCastKind() != .ArrayToPointerDecay) break :blk; const sub_expr = cast_expr.getSubExpr(); if (sub_expr.getStmtClass() != .StringLiteralClass) break :blk; const array_qt = sub_expr.getType(); - const array_type = @ptrCast(*const clang.ArrayType, array_qt.getTypePtr()); + const array_type = @as(*const clang.ArrayType, @ptrCast(array_qt.getTypePtr())); var pointee_qt = array_type.getElementType(); pointee_qt.addConst(); return c.clang_context.getPointerType(pointee_qt); @@ -4412,11 +4412,11 @@ fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType { fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) bool { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return builtin_ty.getKind() == .Void; }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(ty)); const record_decl = record_ty.getDecl(); const record_def = record_decl.getDefinition() orelse return true; @@ -4432,12 +4432,12 @@ fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) b return false; }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); const qt = elaborated_ty.getNamedType(); return typeIsOpaque(c, qt.getTypePtr(), loc); }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); const underlying_type = typedef_decl.getUnderlyingType(); return typeIsOpaque(c, underlying_type.getTypePtr(), loc); @@ -4459,7 +4459,7 @@ fn qualTypeIsCharStar(qt: clang.QualType) bool { fn cIsUnqualifiedChar(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Char_S, .Char_U => true, else => false, @@ -4473,7 +4473,7 @@ fn cIsInteger(qt: clang.QualType) bool { fn cIsUnsignedInteger(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Char_U, .UChar, @@ -4492,7 +4492,7 @@ fn cIsUnsignedInteger(qt: clang.QualType) bool { fn cIntTypeToIndex(qt: clang.QualType) u8 { const c_type = qualTypeCanon(qt); assert(c_type.getTypeClass() == .Builtin); - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Bool, .Char_U, .Char_S, .UChar, .SChar, .Char8 => 1, .WChar_U, .WChar_S => 2, @@ -4513,9 +4513,9 @@ fn cIntTypeCmp(a: clang.QualType, b: clang.QualType) math.Order { /// Checks if expr is an integer literal >= 0 fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool { - if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) { + if (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass() == .IntegerLiteralClass) { var signum: c_int = undefined; - if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) { + if (!(@as(*const clang.IntegerLiteral, @ptrCast(expr)).getSignum(&signum, c.clang_context))) { return false; } return signum >= 0; @@ -4526,7 +4526,7 @@ fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool { fn cIsSignedInteger(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .SChar, .Short, @@ -4543,14 +4543,14 @@ fn cIsSignedInteger(qt: clang.QualType) bool { fn cIsNativeInt(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return builtin_ty.getKind() == .Int; } fn cIsFloating(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .Float, .Double, @@ -4564,7 +4564,7 @@ fn cIsFloating(qt: clang.QualType) bool { fn cIsLongLongInteger(qt: clang.QualType) bool { const c_type = qualTypeCanon(qt); if (c_type.getTypeClass() != .Builtin) return false; - const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type)); return switch (builtin_ty.getKind()) { .LongLong, .ULongLong, .Int128, .UInt128 => true, else => false, @@ -4681,8 +4681,8 @@ fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node { limb_i += 2; data_i += 1; }) { - limbs[limb_i] = @truncate(u32, data[data_i]); - limbs[limb_i + 1] = @truncate(u32, data[data_i] >> 32); + limbs[limb_i] = @as(u32, @truncate(data[data_i])); + limbs[limb_i + 1] = @as(u32, @truncate(data[data_i] >> 32)); } }, else => @compileError("unimplemented"), @@ -4772,7 +4772,7 @@ fn transCreateNodeShiftOp( fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clang.SourceLocation) TypeError!Node { switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return Tag.type.create(c.arena, switch (builtin_ty.getKind()) { .Void => "anyopaque", .Bool => "bool", @@ -4797,17 +4797,17 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan }); }, .FunctionProto => { - const fn_proto_ty = @ptrCast(*const clang.FunctionProtoType, ty); + const fn_proto_ty = @as(*const clang.FunctionProtoType, @ptrCast(ty)); const fn_proto = try transFnProto(c, null, fn_proto_ty, source_loc, null, false); return Node.initPayload(&fn_proto.base); }, .FunctionNoProto => { - const fn_no_proto_ty = @ptrCast(*const clang.FunctionType, ty); + const fn_no_proto_ty = @as(*const clang.FunctionType, @ptrCast(ty)); const fn_proto = try transFnNoProto(c, fn_no_proto_ty, source_loc, null, false); return Node.initPayload(&fn_proto.base); }, .Paren => { - const paren_ty = @ptrCast(*const clang.ParenType, ty); + const paren_ty = @as(*const clang.ParenType, @ptrCast(ty)); return transQualType(c, scope, paren_ty.getInnerType(), source_loc); }, .Pointer => { @@ -4832,7 +4832,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.c_pointer.create(c.arena, ptr_info); }, .ConstantArray => { - const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, ty); + const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(ty)); const size_ap_int = const_arr_ty.getSize(); const size = size_ap_int.getLimitedValue(usize); @@ -4841,7 +4841,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_type }); }, .IncompleteArray => { - const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty); + const incomplete_array_ty = @as(*const clang.IncompleteArrayType, @ptrCast(ty)); const child_qt = incomplete_array_ty.getElementType(); const is_const = child_qt.isConstQualified(); @@ -4851,11 +4851,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.c_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type }); }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); var trans_scope = scope; - if (@ptrCast(*const clang.Decl, typedef_decl).castToNamedDecl()) |named_decl| { + if (@as(*const clang.Decl, @ptrCast(typedef_decl)).castToNamedDecl()) |named_decl| { const decl_name = try c.str(named_decl.getName_bytes_begin()); if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base; if (builtin_typedef_map.get(decl_name)) |builtin| return Tag.type.create(c.arena, builtin); @@ -4865,11 +4865,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.identifier.create(c.arena, name); }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(ty)); const record_decl = record_ty.getDecl(); var trans_scope = scope; - if (@ptrCast(*const clang.Decl, record_decl).castToNamedDecl()) |named_decl| { + if (@as(*const clang.Decl, @ptrCast(record_decl)).castToNamedDecl()) |named_decl| { const decl_name = try c.str(named_decl.getName_bytes_begin()); if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base; } @@ -4878,11 +4878,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.identifier.create(c.arena, name); }, .Enum => { - const enum_ty = @ptrCast(*const clang.EnumType, ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(ty)); const enum_decl = enum_ty.getDecl(); var trans_scope = scope; - if (@ptrCast(*const clang.Decl, enum_decl).castToNamedDecl()) |named_decl| { + if (@as(*const clang.Decl, @ptrCast(enum_decl)).castToNamedDecl()) |named_decl| { const decl_name = try c.str(named_decl.getName_bytes_begin()); if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base; } @@ -4891,27 +4891,27 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.identifier.create(c.arena, name); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); return transQualType(c, scope, elaborated_ty.getNamedType(), source_loc); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty)); return transQualType(c, scope, decayed_ty.getDecayedType(), source_loc); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty)); return transQualType(c, scope, attributed_ty.getEquivalentType(), source_loc); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty)); return transQualType(c, scope, macroqualified_ty.getModifiedType(), source_loc); }, .TypeOf => { - const typeof_ty = @ptrCast(*const clang.TypeOfType, ty); + const typeof_ty = @as(*const clang.TypeOfType, @ptrCast(ty)); return transQualType(c, scope, typeof_ty.getUnmodifiedType(), source_loc); }, .TypeOfExpr => { - const typeofexpr_ty = @ptrCast(*const clang.TypeOfExprType, ty); + const typeofexpr_ty = @as(*const clang.TypeOfExprType, @ptrCast(ty)); const underlying_expr = transExpr(c, scope, typeofexpr_ty.getUnderlyingExpr(), .used) catch |err| switch (err) { error.UnsupportedTranslation => { return fail(c, error.UnsupportedType, source_loc, "unsupported underlying expression for TypeOfExpr", .{}); @@ -4921,7 +4921,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan return Tag.typeof.create(c.arena, underlying_expr); }, .Vector => { - const vector_ty = @ptrCast(*const clang.VectorType, ty); + const vector_ty = @as(*const clang.VectorType, @ptrCast(ty)); const num_elements = vector_ty.getNumElements(); const element_qt = vector_ty.getElementType(); return Tag.vector.create(c.arena, .{ @@ -4944,14 +4944,14 @@ fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool { const ty = qt.getTypePtr(); switch (qt.getTypeClass()) { .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); const underlying_type = typedef_decl.getUnderlyingType(); return qualTypeWasDemotedToOpaque(c, underlying_type); }, .Record => { - const record_ty = @ptrCast(*const clang.RecordType, ty); + const record_ty = @as(*const clang.RecordType, @ptrCast(ty)); const record_decl = record_ty.getDecl(); const canonical = @intFromPtr(record_decl.getCanonicalDecl()); @@ -4967,26 +4967,26 @@ fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool { return false; }, .Enum => { - const enum_ty = @ptrCast(*const clang.EnumType, ty); + const enum_ty = @as(*const clang.EnumType, @ptrCast(ty)); const enum_decl = enum_ty.getDecl(); const canonical = @intFromPtr(enum_decl.getCanonicalDecl()); return c.opaque_demotes.contains(canonical); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, elaborated_ty.getNamedType()); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, decayed_ty.getDecayedType()); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, attributed_ty.getEquivalentType()); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty)); return qualTypeWasDemotedToOpaque(c, macroqualified_ty.getModifiedType()); }, else => return false, @@ -4997,28 +4997,28 @@ fn isAnyopaque(qt: clang.QualType) bool { const ty = qt.getTypePtr(); switch (ty.getTypeClass()) { .Builtin => { - const builtin_ty = @ptrCast(*const clang.BuiltinType, ty); + const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty)); return builtin_ty.getKind() == .Void; }, .Typedef => { - const typedef_ty = @ptrCast(*const clang.TypedefType, ty); + const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty)); const typedef_decl = typedef_ty.getDecl(); return isAnyopaque(typedef_decl.getUnderlyingType()); }, .Elaborated => { - const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty); + const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty)); return isAnyopaque(elaborated_ty.getNamedType().getCanonicalType()); }, .Decayed => { - const decayed_ty = @ptrCast(*const clang.DecayedType, ty); + const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty)); return isAnyopaque(decayed_ty.getDecayedType().getCanonicalType()); }, .Attributed => { - const attributed_ty = @ptrCast(*const clang.AttributedType, ty); + const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty)); return isAnyopaque(attributed_ty.getEquivalentType().getCanonicalType()); }, .MacroQualified => { - const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty); + const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty)); return isAnyopaque(macroqualified_ty.getModifiedType().getCanonicalType()); }, else => return false, @@ -5066,7 +5066,7 @@ fn transFnProto( fn_decl_context: ?FnDeclContext, is_pub: bool, ) !*ast.Payload.Func { - const fn_ty = @ptrCast(*const clang.FunctionType, fn_proto_ty); + const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_proto_ty)); const cc = try transCC(c, fn_ty, source_loc); const is_var_args = fn_proto_ty.isVariadic(); return finishTransFnProto(c, fn_decl, fn_proto_ty, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub); @@ -5108,14 +5108,14 @@ fn finishTransFnProto( var i: usize = 0; while (i < param_count) : (i += 1) { - const param_qt = fn_proto_ty.?.getParamType(@intCast(c_uint, i)); + const param_qt = fn_proto_ty.?.getParamType(@as(c_uint, @intCast(i))); const is_noalias = param_qt.isRestrictQualified(); const param_name: ?[]const u8 = if (fn_decl) |decl| blk: { - const param = decl.getParamDecl(@intCast(c_uint, i)); - const param_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, param).getName_bytes_begin()); + const param = decl.getParamDecl(@as(c_uint, @intCast(i))); + const param_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(param)).getName_bytes_begin()); if (param_name.len < 1) break :blk null; @@ -5576,7 +5576,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void { tok_list.items.len = 0; switch (entity.getKind()) { .MacroDefinitionKind => { - const macro = @ptrCast(*clang.MacroDefinitionRecord, entity); + const macro = @as(*clang.MacroDefinitionRecord, @ptrCast(entity)); const raw_name = macro.getName_getNameStart(); const begin_loc = macro.getSourceRange_getBegin(); @@ -6046,7 +6046,7 @@ fn escapeUnprintables(ctx: *Context, m: *MacroCtx) ![]const u8 { if (std.unicode.utf8ValidateSlice(zigified)) return zigified; const formatter = std.fmt.fmtSliceEscapeLower(zigified); - const encoded_size = @intCast(usize, std.fmt.count("{s}", .{formatter})); + const encoded_size = @as(usize, @intCast(std.fmt.count("{s}", .{formatter}))); var output = try ctx.arena.alloc(u8, encoded_size); return std.fmt.bufPrint(output, "{s}", .{formatter}) catch |err| switch (err) { error.NoSpaceLeft => unreachable, diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index a24bff017628..50a7a79f8786 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -393,7 +393,7 @@ pub const Node = extern union { pub fn tag(self: Node) Tag { if (self.tag_if_small_enough < Tag.no_payload_count) { - return @enumFromInt(Tag, @intCast(std.meta.Tag(Tag), self.tag_if_small_enough)); + return @as(Tag, @enumFromInt(@as(std.meta.Tag(Tag), @intCast(self.tag_if_small_enough)))); } else { return self.ptr_otherwise.tag; } @@ -778,7 +778,7 @@ pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast { try ctx.tokens.append(gpa, .{ .tag = .eof, - .start = @intCast(u32, ctx.buf.items.len), + .start = @as(u32, @intCast(ctx.buf.items.len)), }); return std.zig.Ast{ @@ -808,10 +808,10 @@ const Context = struct { try c.tokens.append(c.gpa, .{ .tag = tag, - .start = @intCast(u32, start_index), + .start = @as(u32, @intCast(start_index)), }); - return @intCast(u32, c.tokens.len - 1); + return @as(u32, @intCast(c.tokens.len - 1)); } fn addToken(c: *Context, tag: TokenTag, bytes: []const u8) Allocator.Error!TokenIndex { @@ -827,13 +827,13 @@ const Context = struct { fn listToSpan(c: *Context, list: []const NodeIndex) Allocator.Error!NodeSubRange { try c.extra_data.appendSlice(c.gpa, list); return NodeSubRange{ - .start = @intCast(NodeIndex, c.extra_data.items.len - list.len), - .end = @intCast(NodeIndex, c.extra_data.items.len), + .start = @as(NodeIndex, @intCast(c.extra_data.items.len - list.len)), + .end = @as(NodeIndex, @intCast(c.extra_data.items.len)), }; } fn addNode(c: *Context, elem: std.zig.Ast.Node) Allocator.Error!NodeIndex { - const result = @intCast(NodeIndex, c.nodes.len); + const result = @as(NodeIndex, @intCast(c.nodes.len)); try c.nodes.append(c.gpa, elem); return result; } @@ -841,7 +841,7 @@ const Context = struct { fn addExtra(c: *Context, extra: anytype) Allocator.Error!NodeIndex { const fields = std.meta.fields(@TypeOf(extra)); try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len); - const result = @intCast(u32, c.extra_data.items.len); + const result = @as(u32, @intCast(c.extra_data.items.len)); inline for (fields) |field| { comptime std.debug.assert(field.type == NodeIndex); c.extra_data.appendAssumeCapacity(@field(extra, field.name)); diff --git a/src/type.zig b/src/type.zig index 280c29231426..e4ae2d2c3539 100644 --- a/src/type.zig +++ b/src/type.zig @@ -807,7 +807,7 @@ pub const Type = struct { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| { - return @intCast(u32, a); + return @as(u32, @intCast(a)); } else if (opt_sema) |sema| { const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; @@ -886,7 +886,7 @@ pub const Type = struct { }, .vector_type => |vector_type| { const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); - const bits = @intCast(u32, bits_u64); + const bits = @as(u32, @intCast(bits_u64)); const bytes = ((bits * vector_type.len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); return AbiAlignmentAdvanced{ .scalar = alignment }; @@ -901,7 +901,7 @@ pub const Type = struct { // represents machine code; not a pointer .func_type => |func_type| return AbiAlignmentAdvanced{ .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| - @intCast(u32, a) + @as(u32, @intCast(a)) else target_util.defaultFunctionAlignment(target), }, @@ -1015,7 +1015,7 @@ pub const Type = struct { else => |e| return e, })) continue; - const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse + const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { @@ -1026,7 +1026,7 @@ pub const Type = struct { .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, - }); + })); big_align = @max(big_align, field_align); // This logic is duplicated in Module.Struct.Field.alignment. @@ -1221,7 +1221,7 @@ pub const Type = struct { else => |e| return e, })) continue; - const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse + const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { @@ -1232,7 +1232,7 @@ pub const Type = struct { .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, - }); + })); max_align = @max(max_align, field_align); } return AbiAlignmentAdvanced{ .scalar = max_align }; @@ -1307,7 +1307,7 @@ pub const Type = struct { } })).toValue() }, }; const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); - const elem_bits = @intCast(u32, elem_bits_u64); + const elem_bits = @as(u32, @intCast(elem_bits_u64)); const total_bits = elem_bits * vector_type.len; const total_bytes = (total_bits + 7) / 8; const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { @@ -1573,12 +1573,12 @@ pub const Type = struct { fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); - return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); + return std.mem.alignForward(u64, @as(u16, @intCast((@as(u17, bits) + 7) / 8)), alignment); } fn intAbiAlignment(bits: u16, target: Target) u32 { return @min( - std.math.ceilPowerOfTwoPromote(u16, @intCast(u16, (@as(u17, bits) + 7) / 8)), + std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), target.maxIntAlignment(), ); } @@ -2166,7 +2166,7 @@ pub const Type = struct { pub fn vectorLen(ty: Type, mod: *const Module) u32 { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, - .anon_struct_type => |tuple| @intCast(u32, tuple.types.len), + .anon_struct_type => |tuple| @as(u32, @intCast(tuple.types.len)), else => unreachable, }; } @@ -3124,7 +3124,7 @@ pub const Type = struct { for (struct_obj.fields.values(), 0..) |f, i| { if (!f.ty.hasRuntimeBits(mod)) continue; - const field_bits = @intCast(u16, f.ty.bitSize(mod)); + const field_bits = @as(u16, @intCast(f.ty.bitSize(mod))); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -3385,8 +3385,8 @@ pub const Type = struct { pub fn smallestUnsignedBits(max: u64) u16 { if (max == 0) return 0; const base = std.math.log2(max); - const upper = (@as(u64, 1) << @intCast(u6, base)) - 1; - return @intCast(u16, base + @intFromBool(upper < max)); + const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; + return @as(u16, @intCast(base + @intFromBool(upper < max))); } /// This is only used for comptime asserts. Bump this number when you make a change diff --git a/src/value.zig b/src/value.zig index 542dfb73ec26..1c22717152cc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -112,7 +112,7 @@ pub const Value = struct { return self.castTag(T.base_tag); } inline for (@typeInfo(Tag).Enum.fields) |field| { - const t = @enumFromInt(Tag, field.value); + const t = @as(Tag, @enumFromInt(field.value)); if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); @@ -203,8 +203,8 @@ pub const Value = struct { .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const len = @intCast(usize, ty.arrayLen(mod)); + const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod))); + const len = @as(usize, @intCast(ty.arrayLen(mod))); try ip.string_bytes.appendNTimes(mod.gpa, byte, len); return ip.getOrPutTrailingString(mod.gpa, len); }, @@ -226,8 +226,8 @@ pub const Value = struct { .bytes => |bytes| try allocator.dupe(u8, bytes), .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod))); + const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod)))); @memset(result, byte); return result; }, @@ -237,10 +237,10 @@ pub const Value = struct { } fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { - const result = try allocator.alloc(u8, @intCast(usize, len)); + const result = try allocator.alloc(u8, @as(usize, @intCast(len))); for (result, 0..) |*elem, i| { const elem_val = try val.elemValue(mod, i); - elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); + elem.* = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); } return result; } @@ -248,7 +248,7 @@ pub const Value = struct { fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { const gpa = mod.gpa; const ip = &mod.intern_pool; - const len = @intCast(usize, len_u64); + const len = @as(usize, @intCast(len_u64)); try ip.string_bytes.ensureUnusedCapacity(gpa, len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's @@ -256,7 +256,7 @@ pub const Value = struct { const prev = ip.string_bytes.items.len; const elem_val = try val.elemValue(mod, i); assert(ip.string_bytes.items.len == prev); - const byte = @intCast(u8, elem_val.toUnsignedInt(mod)); + const byte = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); ip.string_bytes.appendAssumeCapacity(byte); } return ip.getOrPutTrailingString(gpa, len); @@ -303,7 +303,7 @@ pub const Value = struct { } }); }, .aggregate => { - const len = @intCast(usize, ty.arrayLen(mod)); + const len = @as(usize, @intCast(ty.arrayLen(mod))); const old_elems = val.castTag(.aggregate).?.data[0..len]; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); @@ -534,7 +534,7 @@ pub const Value = struct { const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); - return base_addr + struct_ty.structFieldOffset(@intCast(usize, field.index), mod); + return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod); }, else => null, }, @@ -561,9 +561,9 @@ pub const Value = struct { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, - .u64 => |x| @intCast(i64, x), - .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)), - .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)), + .u64 => |x| @as(i64, @intCast(x)), + .lazy_align => |ty| @as(i64, @intCast(ty.toType().abiAlignment(mod))), + .lazy_size => |ty| @as(i64, @intCast(ty.toType().abiSize(mod))), }, else => unreachable, }, @@ -604,7 +604,7 @@ pub const Value = struct { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef(mod)) { - const size = @intCast(usize, ty.abiSize(mod)); + const size = @as(usize, @intCast(ty.abiSize(mod))); @memset(buffer[0..size], 0xaa); return; } @@ -623,17 +623,17 @@ pub const Value = struct { bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @as(u16, @bitCast(val.toFloat(f16, mod))), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @as(u32, @bitCast(val.toFloat(f32, mod))), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @as(u64, @bitCast(val.toFloat(f64, mod))), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @as(u80, @bitCast(val.toFloat(f80, mod))), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @as(u128, @bitCast(val.toFloat(f128, mod))), endian), else => unreachable, }, .Array => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); - const elem_size = @intCast(usize, elem_ty.abiSize(mod)); + const elem_size = @as(usize, @intCast(elem_ty.abiSize(mod))); var elem_i: usize = 0; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { @@ -645,13 +645,13 @@ pub const Value = struct { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, .Struct => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => for (ty.structFields(mod).values(), 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const off = @as(usize, @intCast(ty.structFieldOffset(i, mod))); const field_val = switch (val.ip_index) { .none => switch (val.tag()) { .bytes => { @@ -674,7 +674,7 @@ pub const Value = struct { try writeToMemory(field_val, field.ty, mod, buffer[off..]); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -686,14 +686,14 @@ pub const Value = struct { .error_union => |error_union| error_union.val.err_name, else => unreachable, }; - const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); - std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); + const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); + std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @as(Int, @intCast(int)), endian); }, .Union => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -730,7 +730,7 @@ pub const Value = struct { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef(mod)) { - const bit_size = @intCast(usize, ty.bitSize(mod)); + const bit_size = @as(usize, @intCast(ty.bitSize(mod))); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; } @@ -742,9 +742,9 @@ pub const Value = struct { .Big => buffer.len - bit_offset / 8 - 1, }; if (val.toBool()) { - buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8)); + buffer[byte_index] |= (@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); } else { - buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8)); + buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); } }, .Int, .Enum => { @@ -759,17 +759,17 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @as(u16, @bitCast(val.toFloat(f16, mod))), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @as(u32, @bitCast(val.toFloat(f32, mod))), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @as(u64, @bitCast(val.toFloat(f64, mod))), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @as(u80, @bitCast(val.toFloat(f80, mod))), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @as(u128, @bitCast(val.toFloat(f128, mod))), endian), else => unreachable, }, .Vector => { const elem_ty = ty.childType(mod); - const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); - const len = @intCast(usize, ty.arrayLen(mod)); + const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); + const len = @as(usize, @intCast(ty.arrayLen(mod))); var bits: u16 = 0; var elem_i: usize = 0; @@ -789,7 +789,7 @@ pub const Value = struct { const fields = ty.structFields(mod).values(); const storage = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage; for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(mod)); + const field_bits = @as(u16, @intCast(field.ty.bitSize(mod))); const field_val = switch (storage) { .bytes => unreachable, .elems => |elems| elems[i], @@ -865,12 +865,12 @@ pub const Value = struct { if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, } else { // Slow path, we have to construct a big-int @@ -886,22 +886,22 @@ pub const Value = struct { .Float => return (try mod.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, - 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, - 64 => .{ .f64 = @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian)) }, - 80 => .{ .f80 = @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian)) }, - 128 => .{ .f128 = @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian)) }, + 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) }, + 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readInt(u32, buffer[0..4], endian))) }, + 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readInt(u64, buffer[0..8], endian))) }, + 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readInt(u80, buffer[0..10], endian))) }, + 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) }, else => unreachable, }, } })).toValue(), .Array => { const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); var offset: usize = 0; for (elems) |*elem| { elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); - offset += @intCast(usize, elem_size); + offset += @as(usize, @intCast(elem_size)); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -911,7 +911,7 @@ pub const Value = struct { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, .Struct => switch (ty.containerLayout(mod)) { @@ -920,8 +920,8 @@ pub const Value = struct { const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(InternPool.Index, fields.len); for (field_vals, fields, 0..) |*field_val, field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, field.ty.abiSize(mod)); + const off = @as(usize, @intCast(ty.structFieldOffset(i, mod))); + const sz = @as(usize, @intCast(field.ty.abiSize(mod))); field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod); } return (try mod.intern(.{ .aggregate = .{ @@ -930,7 +930,7 @@ pub const Value = struct { } })).toValue(); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, @@ -938,7 +938,7 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - const name = mod.global_error_set.keys()[@intCast(usize, int)]; + const name = mod.global_error_set.keys()[@as(usize, @intCast(int))]; return (try mod.intern(.{ .err = .{ .ty = ty.toIntern(), .name = name, @@ -977,7 +977,7 @@ pub const Value = struct { .Big => buffer[buffer.len - bit_offset / 8 - 1], .Little => buffer[bit_offset / 8], }; - if (((byte >> @intCast(u3, bit_offset % 8)) & 1) == 0) { + if (((byte >> @as(u3, @intCast(bit_offset % 8))) & 1) == 0) { return Value.false; } else { return Value.true; @@ -1009,7 +1009,7 @@ pub const Value = struct { } // Slow path, we have to construct a big-int - const abi_size = @intCast(usize, ty.abiSize(mod)); + const abi_size = @as(usize, @intCast(ty.abiSize(mod))); const Limb = std.math.big.Limb; const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); const limbs_buffer = try arena.alloc(Limb, limb_count); @@ -1021,20 +1021,20 @@ pub const Value = struct { .Float => return (try mod.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, - 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, - 64 => .{ .f64 = @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian)) }, - 80 => .{ .f80 = @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian)) }, - 128 => .{ .f128 = @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian)) }, + 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) }, + 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian))) }, + 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian))) }, + 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian))) }, + 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) }, else => unreachable, }, } })).toValue(), .Vector => { const elem_ty = ty.childType(mod); - const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); var bits: u16 = 0; - const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); + const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; @@ -1054,7 +1054,7 @@ pub const Value = struct { const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(InternPool.Index, fields.len); for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(mod)); + const field_bits = @as(u16, @intCast(field.ty.bitSize(mod))); field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod); bits += field_bits; } @@ -1081,18 +1081,18 @@ pub const Value = struct { pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { - .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + .big_int => |big_int| @as(T, @floatCast(bigIntToFloat(big_int.limbs, big_int.positive))), inline .u64, .i64 => |x| { if (T == f80) { @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); } - return @floatFromInt(T, x); + return @as(T, @floatFromInt(x)); }, - .lazy_align => |ty| @floatFromInt(T, ty.toType().abiAlignment(mod)), - .lazy_size => |ty| @floatFromInt(T, ty.toType().abiSize(mod)), + .lazy_align => |ty| @as(T, @floatFromInt(ty.toType().abiAlignment(mod))), + .lazy_size => |ty| @as(T, @floatFromInt(ty.toType().abiSize(mod))), }, .float => |float| switch (float.storage) { - inline else => |x| @floatCast(T, x), + inline else => |x| @as(T, @floatCast(x)), }, else => unreachable, }; @@ -1107,7 +1107,7 @@ pub const Value = struct { var i: usize = limbs.len; while (i != 0) { i -= 1; - const limb: f128 = @floatFromInt(f128, limbs[i]); + const limb: f128 = @as(f128, @floatFromInt(limbs[i])); result = @mulAdd(f128, base, result, limb); } if (positive) { @@ -1132,7 +1132,7 @@ pub const Value = struct { pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { var bigint_buf: BigIntSpace = undefined; const bigint = val.toBigInt(&bigint_buf, mod); - return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits)); + return @as(u64, @intCast(bigint.popCount(ty.intInfo(mod).bits))); } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { @@ -1505,10 +1505,10 @@ pub const Value = struct { .int, .eu_payload => unreachable, .opt_payload => |base| base.toValue().elemValue(mod, index), .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), - .elem => |elem| elem.base.toValue().elemValue(mod, index + @intCast(usize, elem.index)), + .elem => |elem| elem.base.toValue().elemValue(mod, index + @as(usize, @intCast(elem.index))), .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { const base_decl = mod.declPtr(decl_index); - const field_val = try base_decl.val.fieldValue(mod, @intCast(usize, field.index)); + const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index))); return field_val.elemValue(mod, index); } else unreachable, }, @@ -1604,18 +1604,18 @@ pub const Value = struct { .comptime_field => |comptime_field| comptime_field.toValue() .sliceArray(mod, arena, start, end), .elem => |elem| elem.base.toValue() - .sliceArray(mod, arena, start + @intCast(usize, elem.index), end + @intCast(usize, elem.index)), + .sliceArray(mod, arena, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))), else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { .array_type => |array_type| try mod.arrayType(.{ - .len = @intCast(u32, end - start), + .len = @as(u32, @intCast(end - start)), .child = array_type.child, .sentinel = if (end == array_type.len) array_type.sentinel else .none, }), .vector_type => |vector_type| try mod.vectorType(.{ - .len = @intCast(u32, end - start), + .len = @as(u32, @intCast(end - start)), .child = vector_type.child, }), else => unreachable, @@ -1734,7 +1734,7 @@ pub const Value = struct { .simple_value => |v| v == .undefined, .ptr => |ptr| switch (ptr.len) { .none => false, - else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| { + else => for (0..@as(usize, @intCast(ptr.len.toValue().toUnsignedInt(mod)))) |index| { if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; } else false, }, @@ -1783,7 +1783,7 @@ pub const Value = struct { pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { return if (getErrorName(val, mod).unwrap()) |err_name| - @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err_name).?) + @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err_name).?)) else 0; } @@ -1868,11 +1868,11 @@ pub const Value = struct { fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value { const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { - 16 => .{ .f16 = @floatFromInt(f16, x) }, - 32 => .{ .f32 = @floatFromInt(f32, x) }, - 64 => .{ .f64 = @floatFromInt(f64, x) }, - 80 => .{ .f80 = @floatFromInt(f80, x) }, - 128 => .{ .f128 = @floatFromInt(f128, x) }, + 16 => .{ .f16 = @as(f16, @floatFromInt(x)) }, + 32 => .{ .f32 = @as(f32, @floatFromInt(x)) }, + 64 => .{ .f64 = @as(f64, @floatFromInt(x)) }, + 80 => .{ .f80 = @as(f80, @floatFromInt(x)) }, + 128 => .{ .f128 = @as(f128, @floatFromInt(x)) }, else => unreachable, }; return (try mod.intern(.{ .float = .{ @@ -1887,7 +1887,7 @@ pub const Value = struct { } const w_value = @fabs(scalar); - return @divFloor(@intFromFloat(std.math.big.Limb, std.math.log2(w_value)), @typeInfo(std.math.big.Limb).Int.bits) + 1; + return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).Int.bits) + 1; } pub const OverflowArithmeticResult = struct { @@ -2738,14 +2738,14 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); const bits_elem = try bits.elemValue(mod, i); - scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })).toValue(); } - return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); + return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod); } pub fn intTruncScalar( @@ -2793,7 +2793,7 @@ pub const Value = struct { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -2855,7 +2855,7 @@ pub const Value = struct { const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -2912,7 +2912,7 @@ pub const Value = struct { var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -2984,7 +2984,7 @@ pub const Value = struct { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @intCast(usize, rhs.toUnsignedInt(mod)); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index d3e4d8125014..c8eb71a4333c 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -24,7 +24,7 @@ test "slicing array of length 1 can not assume runtime index is always zero" { const slice = @as(*align(4) [1]u8, &foo)[runtime_index..]; try expect(@TypeOf(slice) == []u8); try expect(slice.len == 0); - try expect(@truncate(u2, @intFromPtr(slice.ptr) - 1) == 0); + try expect(@as(u2, @truncate(@intFromPtr(slice.ptr) - 1)) == 0); } test "default alignment allows unspecified in type syntax" { @@ -47,7 +47,7 @@ test "@alignCast pointers" { try expect(x == 2); } fn expectsOnly1(x: *align(1) u32) void { - expects4(@alignCast(4, x)); + expects4(@alignCast(x)); } fn expects4(x: *align(4) u32) void { x.* += 1; @@ -213,12 +213,6 @@ test "alignment and size of structs with 128-bit fields" { } } -test "@ptrCast preserves alignment of bigger source" { - var x: u32 align(16) = 1234; - const ptr = @ptrCast(*u8, &x); - try expect(@TypeOf(ptr) == *align(16) u8); -} - test "alignstack" { try expect(fnWithAlignedStack() == 1234); } @@ -249,7 +243,7 @@ test "specifying alignment allows pointer cast" { } fn testBytesAlign(b: u8) !void { var bytes align(4) = [_]u8{ b, b, b, b }; - const ptr = @ptrCast(*u32, &bytes[0]); + const ptr = @as(*u32, @ptrCast(&bytes[0])); try expect(ptr.* == 0x33333333); } @@ -265,7 +259,7 @@ test "@alignCast slices" { try expect(slice[0] == 2); } fn sliceExpectsOnly1(slice: []align(1) u32) void { - sliceExpects4(@alignCast(4, slice)); + sliceExpects4(@alignCast(slice)); } fn sliceExpects4(slice: []align(4) u32) void { slice[0] += 1; @@ -302,8 +296,8 @@ test "page aligned array on stack" { try expect(@intFromPtr(&array[0]) & 0xFFF == 0); try expect(array[3] == 4); - try expect(@truncate(u4, @intFromPtr(&number1)) == 0); - try expect(@truncate(u4, @intFromPtr(&number2)) == 0); + try expect(@as(u4, @truncate(@intFromPtr(&number1))) == 0); + try expect(@as(u4, @truncate(@intFromPtr(&number2))) == 0); try expect(number1 == 42); try expect(number2 == 43); } @@ -366,7 +360,7 @@ test "@alignCast functions" { try expect(fnExpectsOnly1(simple4) == 0x19); } fn fnExpectsOnly1(ptr: *const fn () align(1) i32) i32 { - return fnExpects4(@alignCast(4, ptr)); + return fnExpects4(@alignCast(ptr)); } fn fnExpects4(ptr: *const fn () align(4) i32) i32 { return ptr(); @@ -461,9 +455,11 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void { test "alignment of function with c calling convention" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + const a = @alignOf(@TypeOf(nothing)); + var runtime_nothing = ¬hing; - const casted1 = @ptrCast(*const u8, runtime_nothing); - const casted2 = @ptrCast(*const fn () callconv(.C) void, casted1); + const casted1: *align(a) const u8 = @ptrCast(runtime_nothing); + const casted2: *const fn () callconv(.C) void = @ptrCast(casted1); casted2(); } @@ -588,7 +584,7 @@ test "@alignCast null" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var ptr: ?*anyopaque = null; - const aligned: ?*anyopaque = @alignCast(@alignOf(?*anyopaque), ptr); + const aligned: ?*anyopaque = @alignCast(ptr); try expect(aligned == null); } diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 9ef4a55b398c..bc8176aa9cdf 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -170,7 +170,7 @@ test "array with sentinels" { { var zero_sized: [0:0xde]u8 = [_:0xde]u8{}; try expect(zero_sized[0] == 0xde); - var reinterpreted = @ptrCast(*[1]u8, &zero_sized); + var reinterpreted = @as(*[1]u8, @ptrCast(&zero_sized)); try expect(reinterpreted[0] == 0xde); } var arr: [3:0x55]u8 = undefined; @@ -694,7 +694,7 @@ test "array init of container level array variable" { test "runtime initialized sentinel-terminated array literal" { var c: u16 = 300; const f = &[_:0x9999]u16{c}; - const g = @ptrCast(*const [4]u8, f); + const g = @as(*const [4]u8, @ptrCast(f)); try std.testing.expect(g[2] == 0x99); try std.testing.expect(g[3] == 0x99); } diff --git a/test/behavior/async_fn.zig b/test/behavior/async_fn.zig index dcbe78b0910c..7eaa5c78d02d 100644 --- a/test/behavior/async_fn.zig +++ b/test/behavior/async_fn.zig @@ -136,12 +136,12 @@ test "@frameSize" { const S = struct { fn doTheTest() !void { { - var ptr = @ptrCast(fn (i32) callconv(.Async) void, other); + var ptr = @as(fn (i32) callconv(.Async) void, @ptrCast(other)); const size = @frameSize(ptr); try expect(size == @sizeOf(@Frame(other))); } { - var ptr = @ptrCast(fn () callconv(.Async) void, first); + var ptr = @as(fn () callconv(.Async) void, @ptrCast(first)); const size = @frameSize(ptr); try expect(size == @sizeOf(@Frame(first))); } @@ -1184,7 +1184,7 @@ test "using @TypeOf on a generic function call" { global_frame = @frame(); } const F = @TypeOf(async amain(x - 1)); - const frame = @ptrFromInt(*F, @intFromPtr(&buf)); + const frame = @as(*F, @ptrFromInt(@intFromPtr(&buf))); return await @asyncCall(frame, {}, amain, .{x - 1}); } }; @@ -1212,7 +1212,7 @@ test "recursive call of await @asyncCall with struct return type" { global_frame = @frame(); } const F = @TypeOf(async amain(x - 1)); - const frame = @ptrFromInt(*F, @intFromPtr(&buf)); + const frame = @as(*F, @ptrFromInt(@intFromPtr(&buf))); return await @asyncCall(frame, {}, amain, .{x - 1}); } @@ -1833,7 +1833,7 @@ test "avoid forcing frame alignment resolution implicit cast to *anyopaque" { } }; var frame = async S.foo(); - resume @ptrCast(anyframe->bool, @alignCast(@alignOf(@Frame(S.foo)), S.x)); + resume @as(anyframe->bool, @ptrCast(@alignCast(S.x))); try expect(nosuspend await frame); } diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 4394e62f6f14..5264ef75cf5e 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -326,7 +326,7 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void { const uint = std.meta.Int(.unsigned, 128); const int = std.meta.Int(signedness, 128); - const initial: int = @bitCast(int, @as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd)); + const initial: int = @as(int, @bitCast(@as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd))); const replacement: int = 0x00000000_00000005_00000000_00000003; var x: int align(16) = initial; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index f98cf8f23714..87cbb3e24224 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -20,7 +20,7 @@ test "truncate" { try comptime expect(testTruncate(0x10fd) == 0xfd); } fn testTruncate(x: u32) u8 { - return @truncate(u8, x); + return @as(u8, @truncate(x)); } test "truncate to non-power-of-two integers" { @@ -56,7 +56,7 @@ test "truncate to non-power-of-two integers from 128-bit" { } fn testTrunc(comptime Big: type, comptime Little: type, big: Big, little: Little) !void { - try expect(@truncate(Little, big) == little); + try expect(@as(Little, @truncate(big)) == little); } const g1: i32 = 1233 + 1; @@ -229,9 +229,9 @@ test "opaque types" { const global_a: i32 = 1234; const global_b: *const i32 = &global_a; -const global_c: *const f32 = @ptrCast(*const f32, global_b); +const global_c: *const f32 = @as(*const f32, @ptrCast(global_b)); test "compile time global reinterpret" { - const d = @ptrCast(*const i32, global_c); + const d = @as(*const i32, @ptrCast(global_c)); try expect(d.* == 1234); } @@ -362,7 +362,7 @@ test "variable is allowed to be a pointer to an opaque type" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var x: i32 = 1234; - _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x)); + _ = hereIsAnOpaqueType(@as(*OpaqueA, @ptrCast(&x))); } fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA { var a = ptr; @@ -442,7 +442,7 @@ test "array 3D const double ptr with offset" { } fn testArray2DConstDoublePtr(ptr: *const f32) !void { - const ptr2 = @ptrCast([*]const f32, ptr); + const ptr2 = @as([*]const f32, @ptrCast(ptr)); try expect(ptr2[0] == 1.0); try expect(ptr2[1] == 2.0); } @@ -574,9 +574,9 @@ test "constant equal function pointers" { fn emptyFn() void {} -const addr1 = @ptrCast(*const u8, &emptyFn); +const addr1 = @as(*const u8, @ptrCast(&emptyFn)); test "comptime cast fn to ptr" { - const addr2 = @ptrCast(*const u8, &emptyFn); + const addr2 = @as(*const u8, @ptrCast(&emptyFn)); try comptime expect(addr1 == addr2); } @@ -667,7 +667,7 @@ test "string escapes" { test "explicit cast optional pointers" { const a: ?*i32 = undefined; - const b: ?*f32 = @ptrCast(?*f32, a); + const b: ?*f32 = @as(?*f32, @ptrCast(a)); _ = b; } @@ -752,7 +752,7 @@ test "auto created variables have correct alignment" { const S = struct { fn foo(str: [*]const u8) u32 { - for (@ptrCast([*]align(1) const u32, str)[0..1]) |v| { + for (@as([*]align(1) const u32, @ptrCast(str))[0..1]) |v| { return v; } return 0; @@ -772,7 +772,7 @@ test "extern variable with non-pointer opaque type" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @export(var_to_export, .{ .name = "opaque_extern_var" }); - try expect(@ptrCast(*align(1) u32, &opaque_extern_var).* == 42); + try expect(@as(*align(1) u32, @ptrCast(&opaque_extern_var)).* == 42); } extern var opaque_extern_var: opaque {}; var var_to_export: u32 = 42; diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig index 03eb4433e13b..8b605385d2cf 100644 --- a/test/behavior/bit_shifting.zig +++ b/test/behavior/bit_shifting.zig @@ -28,7 +28,7 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt // TODO: https://github.com/ziglang/zig/issues/1544 // This cast could be implicit if we teach the compiler that // u32 >> 30 -> u2 - return @intCast(ShardKey, shard_key); + return @as(ShardKey, @intCast(shard_key)); } pub fn put(self: *Self, node: *Node) void { @@ -85,14 +85,14 @@ fn testShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, c var table = Table.create(); var node_buffer: [node_count]Table.Node = undefined; for (&node_buffer, 0..) |*node, i| { - const key = @intCast(Key, i); + const key = @as(Key, @intCast(i)); try expect(table.get(key) == null); node.init(key, {}); table.put(node); } for (&node_buffer, 0..) |*node, i| { - try expect(table.get(@intCast(Key, i)) == node); + try expect(table.get(@as(Key, @intCast(i))) == node); } } diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index f71a05cada5e..0c137a2baadb 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -71,11 +71,11 @@ fn testBitCast(comptime N: usize) !void { } fn conv_iN(comptime N: usize, x: std.meta.Int(.signed, N)) std.meta.Int(.unsigned, N) { - return @bitCast(std.meta.Int(.unsigned, N), x); + return @as(std.meta.Int(.unsigned, N), @bitCast(x)); } fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signed, N) { - return @bitCast(std.meta.Int(.signed, N), x); + return @as(std.meta.Int(.signed, N), @bitCast(x)); } test "bitcast uX to bytes" { @@ -114,14 +114,14 @@ fn testBitCastuXToBytes(comptime N: usize) !void { while (byte_i < (byte_count - 1)) : (byte_i += 1) { try expect(bytes[byte_i] == 0xff); } - try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0); + try expect(((bytes[byte_i] ^ 0xff) << -%@as(u3, @truncate(N))) == 0); }, .Big => { var byte_i = byte_count - 1; while (byte_i > 0) : (byte_i -= 1) { try expect(bytes[byte_i] == 0xff); } - try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0); + try expect(((bytes[byte_i] ^ 0xff) << -%@as(u3, @truncate(N))) == 0); }, } } @@ -130,12 +130,12 @@ fn testBitCastuXToBytes(comptime N: usize) !void { test "nested bitcast" { const S = struct { fn moo(x: isize) !void { - try expect(@intCast(isize, 42) == x); + try expect(@as(isize, @intCast(42)) == x); } fn foo(x: isize) !void { try @This().moo( - @bitCast(isize, if (x != 0) @bitCast(usize, x) else @bitCast(usize, x)), + @as(isize, @bitCast(if (x != 0) @as(usize, @bitCast(x)) else @as(usize, @bitCast(x)))), ); } }; @@ -146,7 +146,7 @@ test "nested bitcast" { // issue #3010: compiler segfault test "bitcast literal [4]u8 param to u32" { - const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 }); + const ip = @as(u32, @bitCast([_]u8{ 255, 255, 255, 255 })); try expect(ip == maxInt(u32)); } @@ -154,7 +154,7 @@ test "bitcast generates a temporary value" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var y = @as(u16, 0x55AA); - const x = @bitCast(u16, @bitCast([2]u8, y)); + const x = @as(u16, @bitCast(@as([2]u8, @bitCast(y)))); try expect(y == x); } @@ -175,7 +175,7 @@ test "@bitCast packed structs at runtime and comptime" { const S = struct { fn doTheTest() !void { var full = Full{ .number = 0x1234 }; - var two_halves = @bitCast(Divided, full); + var two_halves = @as(Divided, @bitCast(full)); try expect(two_halves.half1 == 0x34); try expect(two_halves.quarter3 == 0x2); try expect(two_halves.quarter4 == 0x1); @@ -200,7 +200,7 @@ test "@bitCast extern structs at runtime and comptime" { const S = struct { fn doTheTest() !void { var full = Full{ .number = 0x1234 }; - var two_halves = @bitCast(TwoHalves, full); + var two_halves = @as(TwoHalves, @bitCast(full)); switch (native_endian) { .Big => { try expect(two_halves.half1 == 0x12); @@ -230,8 +230,8 @@ test "bitcast packed struct to integer and back" { const S = struct { fn doTheTest() !void { var move = LevelUpMove{ .move_id = 1, .level = 2 }; - var v = @bitCast(u16, move); - var back_to_a_move = @bitCast(LevelUpMove, v); + var v = @as(u16, @bitCast(move)); + var back_to_a_move = @as(LevelUpMove, @bitCast(v)); try expect(back_to_a_move.move_id == 1); try expect(back_to_a_move.level == 2); } @@ -250,7 +250,7 @@ test "implicit cast to error union by returning" { try expect((func(-1) catch unreachable) == maxInt(u64)); } pub fn func(sz: i64) anyerror!u64 { - return @bitCast(u64, sz); + return @as(u64, @bitCast(sz)); } }; try S.entry(); @@ -261,7 +261,7 @@ test "bitcast packed struct literal to byte" { const Foo = packed struct { value: u8, }; - const casted = @bitCast(u8, Foo{ .value = 0xF }); + const casted = @as(u8, @bitCast(Foo{ .value = 0xF })); try expect(casted == 0xf); } @@ -269,7 +269,7 @@ test "comptime bitcast used in expression has the correct type" { const Foo = packed struct { value: u8, }; - try expect(@bitCast(u8, Foo{ .value = 0xF }) == 0xf); + try expect(@as(u8, @bitCast(Foo{ .value = 0xF })) == 0xf); } test "bitcast passed as tuple element" { @@ -279,7 +279,7 @@ test "bitcast passed as tuple element" { try expect(args[0] == 12.34); } }; - try S.foo(.{@bitCast(f32, @as(u32, 0x414570A4))}); + try S.foo(.{@as(f32, @bitCast(@as(u32, 0x414570A4)))}); } test "triple level result location with bitcast sandwich passed as tuple element" { @@ -289,7 +289,7 @@ test "triple level result location with bitcast sandwich passed as tuple element try expect(args[0] > 12.33 and args[0] < 12.35); } }; - try S.foo(.{@as(f64, @bitCast(f32, @as(u32, 0x414570A4)))}); + try S.foo(.{@as(f64, @as(f32, @bitCast(@as(u32, 0x414570A4))))}); } test "@bitCast packed struct of floats" { @@ -318,7 +318,7 @@ test "@bitCast packed struct of floats" { const S = struct { fn doTheTest() !void { var foo = Foo{}; - var v = @bitCast(Foo2, foo); + var v = @as(Foo2, @bitCast(foo)); try expect(v.a == foo.a); try expect(v.b == foo.b); try expect(v.c == foo.c); @@ -360,12 +360,12 @@ test "comptime @bitCast packed struct to int and back" { // S -> Int var s: S = .{}; - try expectEqual(@bitCast(Int, s), comptime @bitCast(Int, S{})); + try expectEqual(@as(Int, @bitCast(s)), comptime @as(Int, @bitCast(S{}))); // Int -> S var i: Int = 0; - const rt_cast = @bitCast(S, i); - const ct_cast = comptime @bitCast(S, @as(Int, 0)); + const rt_cast = @as(S, @bitCast(i)); + const ct_cast = comptime @as(S, @bitCast(@as(Int, 0))); inline for (@typeInfo(S).Struct.fields) |field| { try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name)); } @@ -381,10 +381,10 @@ test "comptime bitcast with fields following f80" { const FloatT = extern struct { f: f80, x: u128 align(16) }; const x: FloatT = .{ .f = 0.5, .x = 123 }; - var x_as_uint: u256 = comptime @bitCast(u256, x); + var x_as_uint: u256 = comptime @as(u256, @bitCast(x)); - try expect(x.f == @bitCast(FloatT, x_as_uint).f); - try expect(x.x == @bitCast(FloatT, x_as_uint).x); + try expect(x.f == @as(FloatT, @bitCast(x_as_uint)).f); + try expect(x.x == @as(FloatT, @bitCast(x_as_uint)).x); } test "bitcast vector to integer and back" { @@ -398,20 +398,20 @@ test "bitcast vector to integer and back" { const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14; var x = @splat(16, true); x[1] = false; - try expect(@bitCast(u16, x) == comptime @bitCast(u16, @as(@Vector(16, bool), arr))); + try expect(@as(u16, @bitCast(x)) == comptime @as(u16, @bitCast(@as(@Vector(16, bool), arr)))); } fn bitCastWrapper16(x: f16) u16 { - return @bitCast(u16, x); + return @as(u16, @bitCast(x)); } fn bitCastWrapper32(x: f32) u32 { - return @bitCast(u32, x); + return @as(u32, @bitCast(x)); } fn bitCastWrapper64(x: f64) u64 { - return @bitCast(u64, x); + return @as(u64, @bitCast(x)); } fn bitCastWrapper128(x: f128) u128 { - return @bitCast(u128, x); + return @as(u128, @bitCast(x)); } test "bitcast nan float does modify signaling bit" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -425,37 +425,37 @@ test "bitcast nan float does modify signaling bit" { // 16 bit const snan_f16_const = math.nan_f16; - try expectEqual(math.nan_u16, @bitCast(u16, snan_f16_const)); + try expectEqual(math.nan_u16, @as(u16, @bitCast(snan_f16_const))); try expectEqual(math.nan_u16, bitCastWrapper16(snan_f16_const)); var snan_f16_var = math.nan_f16; - try expectEqual(math.nan_u16, @bitCast(u16, snan_f16_var)); + try expectEqual(math.nan_u16, @as(u16, @bitCast(snan_f16_var))); try expectEqual(math.nan_u16, bitCastWrapper16(snan_f16_var)); // 32 bit const snan_f32_const = math.nan_f32; - try expectEqual(math.nan_u32, @bitCast(u32, snan_f32_const)); + try expectEqual(math.nan_u32, @as(u32, @bitCast(snan_f32_const))); try expectEqual(math.nan_u32, bitCastWrapper32(snan_f32_const)); var snan_f32_var = math.nan_f32; - try expectEqual(math.nan_u32, @bitCast(u32, snan_f32_var)); + try expectEqual(math.nan_u32, @as(u32, @bitCast(snan_f32_var))); try expectEqual(math.nan_u32, bitCastWrapper32(snan_f32_var)); // 64 bit const snan_f64_const = math.nan_f64; - try expectEqual(math.nan_u64, @bitCast(u64, snan_f64_const)); + try expectEqual(math.nan_u64, @as(u64, @bitCast(snan_f64_const))); try expectEqual(math.nan_u64, bitCastWrapper64(snan_f64_const)); var snan_f64_var = math.nan_f64; - try expectEqual(math.nan_u64, @bitCast(u64, snan_f64_var)); + try expectEqual(math.nan_u64, @as(u64, @bitCast(snan_f64_var))); try expectEqual(math.nan_u64, bitCastWrapper64(snan_f64_var)); // 128 bit const snan_f128_const = math.nan_f128; - try expectEqual(math.nan_u128, @bitCast(u128, snan_f128_const)); + try expectEqual(math.nan_u128, @as(u128, @bitCast(snan_f128_const))); try expectEqual(math.nan_u128, bitCastWrapper128(snan_f128_const)); var snan_f128_var = math.nan_f128; - try expectEqual(math.nan_u128, @bitCast(u128, snan_f128_var)); + try expectEqual(math.nan_u128, @as(u128, @bitCast(snan_f128_var))); try expectEqual(math.nan_u128, bitCastWrapper128(snan_f128_var)); } diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig index e19a560a9d2d..722edef25ebf 100644 --- a/test/behavior/bitreverse.zig +++ b/test/behavior/bitreverse.zig @@ -62,20 +62,20 @@ fn testBitReverse() !void { // using comptime_ints, signed, positive try expect(@bitReverse(@as(u8, 0)) == 0); - try expect(@bitReverse(@bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49))); - try expect(@bitReverse(@bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48))); - try expect(@bitReverse(@bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48))); - try expect(@bitReverse(@bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48))); - try expect(@bitReverse(@bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f))); - try expect(@bitReverse(@bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48))); - try expect(@bitReverse(@bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f))); - try expect(@bitReverse(@bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48))); - try expect(@bitReverse(@bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48))); - try expect(@bitReverse(@bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48))); - try expect(@bitReverse(@bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48))); + try expect(@bitReverse(@as(i8, @bitCast(@as(u8, 0x92)))) == @as(i8, @bitCast(@as(u8, 0x49)))); + try expect(@bitReverse(@as(i16, @bitCast(@as(u16, 0x1234)))) == @as(i16, @bitCast(@as(u16, 0x2c48)))); + try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0x123456)))) == @as(i24, @bitCast(@as(u24, 0x6a2c48)))); + try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0x12345f)))) == @as(i24, @bitCast(@as(u24, 0xfa2c48)))); + try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0xf23456)))) == @as(i24, @bitCast(@as(u24, 0x6a2c4f)))); + try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0x12345678)))) == @as(i32, @bitCast(@as(u32, 0x1e6a2c48)))); + try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0xf2345678)))) == @as(i32, @bitCast(@as(u32, 0x1e6a2c4f)))); + try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0x1234567f)))) == @as(i32, @bitCast(@as(u32, 0xfe6a2c48)))); + try expect(@bitReverse(@as(i40, @bitCast(@as(u40, 0x123456789a)))) == @as(i40, @bitCast(@as(u40, 0x591e6a2c48)))); + try expect(@bitReverse(@as(i48, @bitCast(@as(u48, 0x123456789abc)))) == @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48)))); + try expect(@bitReverse(@as(i56, @bitCast(@as(u56, 0x123456789abcde)))) == @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48)))); + try expect(@bitReverse(@as(i64, @bitCast(@as(u64, 0x123456789abcdef1)))) == @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48)))); + try expect(@bitReverse(@as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141)))) == @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48)))); + try expect(@bitReverse(@as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181)))) == @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48)))); // using signed, negative. Compare to runtime ints returned from llvm. var neg8: i8 = -18; diff --git a/test/behavior/bool.zig b/test/behavior/bool.zig index 50a098c11190..5d09e5f8a00d 100644 --- a/test/behavior/bool.zig +++ b/test/behavior/bool.zig @@ -15,8 +15,8 @@ test "cast bool to int" { const f = false; try expectEqual(@as(u32, 1), @intFromBool(t)); try expectEqual(@as(u32, 0), @intFromBool(f)); - try expectEqual(-1, @bitCast(i1, @intFromBool(t))); - try expectEqual(0, @bitCast(i1, @intFromBool(f))); + try expectEqual(-1, @as(i1, @bitCast(@intFromBool(t)))); + try expectEqual(0, @as(i1, @bitCast(@intFromBool(f)))); try expectEqual(u1, @TypeOf(@intFromBool(t))); try expectEqual(u1, @TypeOf(@intFromBool(f))); try nonConstCastIntFromBool(t, f); @@ -25,8 +25,8 @@ test "cast bool to int" { fn nonConstCastIntFromBool(t: bool, f: bool) !void { try expectEqual(@as(u32, 1), @intFromBool(t)); try expectEqual(@as(u32, 0), @intFromBool(f)); - try expectEqual(@as(i1, -1), @bitCast(i1, @intFromBool(t))); - try expectEqual(@as(i1, 0), @bitCast(i1, @intFromBool(f))); + try expectEqual(@as(i1, -1), @as(i1, @bitCast(@intFromBool(t)))); + try expectEqual(@as(i1, 0), @as(i1, @bitCast(@intFromBool(f)))); try expectEqual(u1, @TypeOf(@intFromBool(t))); try expectEqual(u1, @TypeOf(@intFromBool(f))); } diff --git a/test/behavior/bugs/11995.zig b/test/behavior/bugs/11995.zig index 0ee8e562147a..fe554bc4bf6c 100644 --- a/test/behavior/bugs/11995.zig +++ b/test/behavior/bugs/11995.zig @@ -25,7 +25,7 @@ test { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var string: [5]u8 = "hello".*; - const arg_data = wuffs_base__slice_u8{ .ptr = @ptrCast([*c]u8, &string), .len = string.len }; + const arg_data = wuffs_base__slice_u8{ .ptr = @as([*c]u8, @ptrCast(&string)), .len = string.len }; var arg_meta = wuffs_base__io_buffer_meta{ .wi = 1, .ri = 2, .pos = 3, .closed = true }; wuffs_base__make_io_buffer(arg_data, &arg_meta); try std.testing.expectEqualStrings("wello", arg_data.ptr[0..arg_data.len]); diff --git a/test/behavior/bugs/12051.zig b/test/behavior/bugs/12051.zig index 5509ab97cd53..342e851b775a 100644 --- a/test/behavior/bugs/12051.zig +++ b/test/behavior/bugs/12051.zig @@ -30,8 +30,8 @@ const Y = struct { return .{ .a = 0, .b = false, - .c = @bitCast(Z, @as(u32, 0)), - .d = @bitCast(Z, @as(u32, 0)), + .c = @as(Z, @bitCast(@as(u32, 0))), + .d = @as(Z, @bitCast(@as(u32, 0))), }; } }; diff --git a/test/behavior/bugs/12119.zig b/test/behavior/bugs/12119.zig index bb5167a3da12..6cfb015eb0a3 100644 --- a/test/behavior/bugs/12119.zig +++ b/test/behavior/bugs/12119.zig @@ -12,6 +12,6 @@ test { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const zerox32: u8x32 = [_]u8{0} ** 32; - const bigsum: u32x8 = @bitCast(u32x8, zerox32); + const bigsum: u32x8 = @as(u32x8, @bitCast(zerox32)); try std.testing.expectEqual(0, @reduce(.Add, bigsum)); } diff --git a/test/behavior/bugs/12450.zig b/test/behavior/bugs/12450.zig index db91529051a5..5ab6565f3c9e 100644 --- a/test/behavior/bugs/12450.zig +++ b/test/behavior/bugs/12450.zig @@ -16,7 +16,7 @@ test { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - var f1: *align(16) Foo = @alignCast(16, @ptrCast(*align(1) Foo, &buffer[0])); + var f1: *align(16) Foo = @alignCast(@as(*align(1) Foo, @ptrCast(&buffer[0]))); try expect(@typeInfo(@TypeOf(f1)).Pointer.alignment == 16); try expect(@intFromPtr(f1) == @intFromPtr(&f1.a)); try expect(@typeInfo(@TypeOf(&f1.a)).Pointer.alignment == 16); diff --git a/test/behavior/bugs/12723.zig b/test/behavior/bugs/12723.zig index abecf890256e..955cc11c119f 100644 --- a/test/behavior/bugs/12723.zig +++ b/test/behavior/bugs/12723.zig @@ -3,6 +3,6 @@ const expect = @import("std").testing.expect; test "Non-exhaustive enum backed by comptime_int" { const E = enum(comptime_int) { a, b, c, _ }; comptime var e: E = .a; - e = @enumFromInt(E, 378089457309184723749); + e = @as(E, @enumFromInt(378089457309184723749)); try expect(@intFromEnum(e) == 378089457309184723749); } diff --git a/test/behavior/bugs/13664.zig b/test/behavior/bugs/13664.zig index 34f6e9110ba9..b0ea3f70af73 100644 --- a/test/behavior/bugs/13664.zig +++ b/test/behavior/bugs/13664.zig @@ -21,7 +21,7 @@ test { const timestamp: i64 = value(); const id = ID{ .fields = Fields{ - .timestamp = @intCast(u50, timestamp), + .timestamp = @as(u50, @intCast(timestamp)), .random_bits = 420, } }; try std.testing.expect((ID{ .value = id.value }).fields.timestamp == timestamp); diff --git a/test/behavior/bugs/421.zig b/test/behavior/bugs/421.zig index 1ed4a6673806..f92bfb989949 100644 --- a/test/behavior/bugs/421.zig +++ b/test/behavior/bugs/421.zig @@ -16,6 +16,6 @@ fn testBitCastArray() !void { } fn extractOne64(a: u128) u64 { - const x = @bitCast([2]u64, a); + const x = @as([2]u64, @bitCast(a)); return x[1]; } diff --git a/test/behavior/bugs/6781.zig b/test/behavior/bugs/6781.zig index 2f5d7a38073c..aac0c31a11fc 100644 --- a/test/behavior/bugs/6781.zig +++ b/test/behavior/bugs/6781.zig @@ -23,7 +23,7 @@ pub const JournalHeader = packed struct { var target: [32]u8 = undefined; std.crypto.hash.Blake3.hash(entry[checksum_offset + checksum_size ..], target[0..], .{}); - return @bitCast(u128, target[0..checksum_size].*); + return @as(u128, @bitCast(target[0..checksum_size].*)); } pub fn calculate_hash_chain_root(self: *const JournalHeader) u128 { @@ -42,16 +42,16 @@ pub const JournalHeader = packed struct { assert(prev_hash_chain_root_offset + prev_hash_chain_root_size == checksum_offset); - const header = @bitCast([@sizeOf(JournalHeader)]u8, self.*); + const header = @as([@sizeOf(JournalHeader)]u8, @bitCast(self.*)); const source = header[prev_hash_chain_root_offset .. checksum_offset + checksum_size]; assert(source.len == prev_hash_chain_root_size + checksum_size); var target: [32]u8 = undefined; std.crypto.hash.Blake3.hash(source, target[0..], .{}); if (segfault) { - return @bitCast(u128, target[0..hash_chain_root_size].*); + return @as(u128, @bitCast(target[0..hash_chain_root_size].*)); } else { var array = target[0..hash_chain_root_size].*; - return @bitCast(u128, array); + return @as(u128, @bitCast(array)); } } diff --git a/test/behavior/bugs/718.zig b/test/behavior/bugs/718.zig index b0f0d1ec5239..0dad101e4b28 100644 --- a/test/behavior/bugs/718.zig +++ b/test/behavior/bugs/718.zig @@ -15,7 +15,7 @@ test "zero keys with @memset" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - @memset(@ptrCast([*]u8, &keys)[0..@sizeOf(@TypeOf(keys))], 0); + @memset(@as([*]u8, @ptrCast(&keys))[0..@sizeOf(@TypeOf(keys))], 0); try expect(!keys.up); try expect(!keys.down); try expect(!keys.left); diff --git a/test/behavior/bugs/726.zig b/test/behavior/bugs/726.zig index 0cd8abc1cfe8..37e8d31cc942 100644 --- a/test/behavior/bugs/726.zig +++ b/test/behavior/bugs/726.zig @@ -8,7 +8,7 @@ test "@ptrCast from const to nullable" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const c: u8 = 4; - var x: ?*const u8 = @ptrCast(?*const u8, &c); + var x: ?*const u8 = @as(?*const u8, @ptrCast(&c)); try expect(x.?.* == 4); } @@ -21,6 +21,6 @@ test "@ptrCast from var in empty struct to nullable" { const container = struct { var c: u8 = 4; }; - var x: ?*const u8 = @ptrCast(?*const u8, &container.c); + var x: ?*const u8 = @as(?*const u8, @ptrCast(&container.c)); try expect(x.?.* == 4); } diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig index ae369c4e9d44..1eb2ef3049ff 100644 --- a/test/behavior/builtin_functions_returning_void_or_noreturn.zig +++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig @@ -17,8 +17,8 @@ test { try testing.expectEqual(void, @TypeOf(@breakpoint())); try testing.expectEqual({}, @export(x, .{ .name = "x" })); try testing.expectEqual({}, @fence(.Acquire)); - try testing.expectEqual({}, @memcpy(@ptrFromInt([*]u8, 1)[0..0], @ptrFromInt([*]u8, 1)[0..0])); - try testing.expectEqual({}, @memset(@ptrFromInt([*]u8, 1)[0..0], undefined)); + try testing.expectEqual({}, @memcpy(@as([*]u8, @ptrFromInt(1))[0..0], @as([*]u8, @ptrFromInt(1))[0..0])); + try testing.expectEqual({}, @memset(@as([*]u8, @ptrFromInt(1))[0..0], undefined)); try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {})); try testing.expectEqual({}, @prefetch(&val, .{})); try testing.expectEqual({}, @setAlignStack(16)); diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig index 8d28285d2795..ce33834ffa7d 100644 --- a/test/behavior/byteswap.zig +++ b/test/behavior/byteswap.zig @@ -16,13 +16,13 @@ test "@byteSwap integers" { try t(u8, 0x12, 0x12); try t(u16, 0x1234, 0x3412); try t(u24, 0x123456, 0x563412); - try t(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2); - try t(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412))); + try t(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2); + try t(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412)))); try t(u32, 0x12345678, 0x78563412); - try t(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2); - try t(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412))); + try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2); + try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412)))); try t(u40, 0x123456789a, 0x9a78563412); - try t(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412))); + try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); try t(u56, 0x123456789abcde, 0xdebc9a78563412); try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412); try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412); @@ -31,19 +31,19 @@ test "@byteSwap integers" { try t(u0, @as(u0, 0), 0); try t(i8, @as(i8, -50), -50); - try t(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412))); - try t(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412))); - try t(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412))); - try t(u40, @bitCast(i40, @as(u40, 0x123456789a)), @as(u40, 0x9a78563412)); - try t(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412))); - try t(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412))); - try t(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412))); - try t(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412))); - try t(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412))); + try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412)))); + try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412)))); + try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412)))); + try t(u40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(u40, 0x9a78563412)); + try t(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412)))); + try t(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412)))); + try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412)))); + try t(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412)))); + try t(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412)))); try t( i128, - @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), - @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412)), + @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), + @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412))), ); } fn t(comptime I: type, input: I, expected_output: I) !void { diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 627df37e9bb2..633f5e9c3ffe 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -368,7 +368,7 @@ test "Enum constructed by @Type passed as generic argument" { } }; inline for (@typeInfo(S.E).Enum.fields, 0..) |_, i| { - try S.foo(@enumFromInt(S.E, i), i); + try S.foo(@as(S.E, @enumFromInt(i)), i); } } diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index d51d864ea1cf..e6aa53bd4122 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -10,13 +10,13 @@ const native_endian = builtin.target.cpu.arch.endian(); test "int to ptr cast" { const x = @as(usize, 13); - const y = @ptrFromInt(*u8, x); + const y = @as(*u8, @ptrFromInt(x)); const z = @intFromPtr(y); try expect(z == 13); } test "integer literal to pointer cast" { - const vga_mem = @ptrFromInt(*u16, 0xB8000); + const vga_mem = @as(*u16, @ptrFromInt(0xB8000)); try expect(@intFromPtr(vga_mem) == 0xB8000); } @@ -52,7 +52,7 @@ fn testResolveUndefWithInt(b: bool, x: i32) !void { } test "@intCast to comptime_int" { - try expect(@intCast(comptime_int, 0) == 0); + try expect(@as(comptime_int, @intCast(0)) == 0); } test "implicit cast comptime numbers to any type when the value fits" { @@ -68,29 +68,29 @@ test "implicit cast comptime_int to comptime_float" { test "comptime_int @floatFromInt" { { - const result = @floatFromInt(f16, 1234); + const result = @as(f16, @floatFromInt(1234)); try expect(@TypeOf(result) == f16); try expect(result == 1234.0); } { - const result = @floatFromInt(f32, 1234); + const result = @as(f32, @floatFromInt(1234)); try expect(@TypeOf(result) == f32); try expect(result == 1234.0); } { - const result = @floatFromInt(f64, 1234); + const result = @as(f64, @floatFromInt(1234)); try expect(@TypeOf(result) == f64); try expect(result == 1234.0); } { - const result = @floatFromInt(f128, 1234); + const result = @as(f128, @floatFromInt(1234)); try expect(@TypeOf(result) == f128); try expect(result == 1234.0); } // big comptime_int (> 64 bits) to f128 conversion { - const result = @floatFromInt(f128, 0x1_0000_0000_0000_0000); + const result = @as(f128, @floatFromInt(0x1_0000_0000_0000_0000)); try expect(@TypeOf(result) == f128); try expect(result == 0x1_0000_0000_0000_0000.0); } @@ -107,8 +107,8 @@ test "@floatFromInt" { } fn testIntToFloat(k: i32) !void { - const f = @floatFromInt(f32, k); - const i = @intFromFloat(i32, f); + const f = @as(f32, @floatFromInt(k)); + const i = @as(i32, @intFromFloat(f)); try expect(i == k); } }; @@ -131,8 +131,8 @@ test "@floatFromInt(f80)" { fn testIntToFloat(comptime Int: type, k: Int) !void { @setRuntimeSafety(false); // TODO - const f = @floatFromInt(f80, k); - const i = @intFromFloat(Int, f); + const f = @as(f80, @floatFromInt(k)); + const i = @as(Int, @intFromFloat(f)); try expect(i == k); } }; @@ -165,7 +165,7 @@ test "@intFromFloat" { fn testIntFromFloats() !void { const x = @as(i32, 1e4); try expect(x == 10000); - const y = @intFromFloat(i32, @as(f32, 1e4)); + const y = @as(i32, @intFromFloat(@as(f32, 1e4))); try expect(y == 10000); try expectIntFromFloat(f32, 255.1, u8, 255); try expectIntFromFloat(f32, 127.2, i8, 127); @@ -173,7 +173,7 @@ fn testIntFromFloats() !void { } fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void { - try expect(@intFromFloat(I, f) == i); + try expect(@as(I, @intFromFloat(f)) == i); } test "implicitly cast indirect pointer to maybe-indirect pointer" { @@ -208,29 +208,29 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" { } test "@intCast comptime_int" { - const result = @intCast(i32, 1234); + const result = @as(i32, @intCast(1234)); try expect(@TypeOf(result) == i32); try expect(result == 1234); } test "@floatCast comptime_int and comptime_float" { { - const result = @floatCast(f16, 1234); + const result = @as(f16, @floatCast(1234)); try expect(@TypeOf(result) == f16); try expect(result == 1234.0); } { - const result = @floatCast(f16, 1234.0); + const result = @as(f16, @floatCast(1234.0)); try expect(@TypeOf(result) == f16); try expect(result == 1234.0); } { - const result = @floatCast(f32, 1234); + const result = @as(f32, @floatCast(1234)); try expect(@TypeOf(result) == f32); try expect(result == 1234.0); } { - const result = @floatCast(f32, 1234.0); + const result = @as(f32, @floatCast(1234.0)); try expect(@TypeOf(result) == f32); try expect(result == 1234.0); } @@ -276,21 +276,21 @@ test "*usize to *void" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var i = @as(usize, 0); - var v = @ptrCast(*void, &i); + var v = @as(*void, @ptrCast(&i)); v.* = {}; } test "@enumFromInt passed a comptime_int to an enum with one item" { const E = enum { A }; - const x = @enumFromInt(E, 0); + const x = @as(E, @enumFromInt(0)); try expect(x == E.A); } test "@intCast to u0 and use the result" { const S = struct { fn doTheTest(zero: u1, one: u1, bigzero: i32) !void { - try expect((one << @intCast(u0, bigzero)) == 1); - try expect((zero << @intCast(u0, bigzero)) == 0); + try expect((one << @as(u0, @intCast(bigzero))) == 1); + try expect((zero << @as(u0, @intCast(bigzero))) == 0); } }; try S.doTheTest(0, 1, 0); @@ -605,7 +605,7 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" { const window_name = [1][*]const u8{"window name"}; const x: [*]const ?[*]const u8 = &window_name; - try expect(mem.eql(u8, std.mem.sliceTo(@ptrCast([*:0]const u8, x[0].?), 0), "window name")); + try expect(mem.eql(u8, std.mem.sliceTo(@as([*:0]const u8, @ptrCast(x[0].?)), 0), "window name")); } test "vector casts" { @@ -625,9 +625,9 @@ test "vector casts" { var up3 = @as(@Vector(2, u64), up0); // Downcast (safety-checked) var down0 = up3; - var down1 = @intCast(@Vector(2, u32), down0); - var down2 = @intCast(@Vector(2, u16), down0); - var down3 = @intCast(@Vector(2, u8), down0); + var down1 = @as(@Vector(2, u32), @intCast(down0)); + var down2 = @as(@Vector(2, u16), @intCast(down0)); + var down3 = @as(@Vector(2, u8), @intCast(down0)); try expect(mem.eql(u16, &@as([2]u16, up1), &[2]u16{ 0x55, 0xaa })); try expect(mem.eql(u32, &@as([2]u32, up2), &[2]u32{ 0x55, 0xaa })); @@ -660,12 +660,12 @@ test "@floatCast cast down" { { var double: f64 = 0.001534; - var single = @floatCast(f32, double); + var single = @as(f32, @floatCast(double)); try expect(single == 0.001534); } { const double: f64 = 0.001534; - const single = @floatCast(f32, double); + const single = @as(f32, @floatCast(double)); try expect(single == 0.001534); } } @@ -1041,7 +1041,7 @@ test "cast between C pointer with different but compatible types" { } fn doTheTest() !void { var x = [_]u16{ 4, 2, 1, 3 }; - try expect(foo(@ptrCast([*]u16, &x)) == 4); + try expect(foo(@as([*]u16, @ptrCast(&x))) == 4); } }; try S.doTheTest(); @@ -1093,10 +1093,10 @@ test "peer type resolve array pointer and unknown pointer" { test "comptime float casts" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const a = @floatFromInt(comptime_float, 1); + const a = @as(comptime_float, @floatFromInt(1)); try expect(a == 1); try expect(@TypeOf(a) == comptime_float); - const b = @intFromFloat(comptime_int, 2); + const b = @as(comptime_int, @intFromFloat(2)); try expect(b == 2); try expect(@TypeOf(b) == comptime_int); @@ -1111,7 +1111,7 @@ test "pointer reinterpret const float to int" { // The hex representation is 0x3fe3333333333303. const float: f64 = 5.99999999999994648725e-01; const float_ptr = &float; - const int_ptr = @ptrCast(*const i32, float_ptr); + const int_ptr = @as(*const i32, @ptrCast(float_ptr)); const int_val = int_ptr.*; if (native_endian == .Little) try expect(int_val == 0x33333303) @@ -1134,7 +1134,7 @@ test "implicit cast from [*]T to ?*anyopaque" { fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void { var n: usize = 0; while (n < len) : (n += 1) { - @ptrCast([*]u8, array.?)[n] += 1; + @as([*]u8, @ptrCast(array.?))[n] += 1; } } @@ -1146,7 +1146,7 @@ test "compile time int to ptr of function" { // On some architectures function pointers must be aligned. const hardcoded_fn_addr = maxInt(usize) & ~@as(usize, 0xf); -pub const FUNCTION_CONSTANT = @ptrFromInt(PFN_void, hardcoded_fn_addr); +pub const FUNCTION_CONSTANT = @as(PFN_void, @ptrFromInt(hardcoded_fn_addr)); pub const PFN_void = *const fn (*anyopaque) callconv(.C) void; fn foobar(func: PFN_void) !void { @@ -1161,10 +1161,10 @@ test "implicit ptr to *anyopaque" { var a: u32 = 1; var ptr: *align(@alignOf(u32)) anyopaque = &a; - var b: *u32 = @ptrCast(*u32, ptr); + var b: *u32 = @as(*u32, @ptrCast(ptr)); try expect(b.* == 1); var ptr2: ?*align(@alignOf(u32)) anyopaque = &a; - var c: *u32 = @ptrCast(*u32, ptr2.?); + var c: *u32 = @as(*u32, @ptrCast(ptr2.?)); try expect(c.* == 1); } @@ -1235,11 +1235,11 @@ fn testCast128() !void { } fn cast128Int(x: f128) u128 { - return @bitCast(u128, x); + return @as(u128, @bitCast(x)); } fn cast128Float(x: u128) f128 { - return @bitCast(f128, x); + return @as(f128, @bitCast(x)); } test "implicit cast from *[N]T to ?[*]T" { @@ -1270,7 +1270,7 @@ test "implicit cast from *T to ?*anyopaque" { } fn incrementVoidPtrValue(value: ?*anyopaque) void { - @ptrCast(*u8, value.?).* += 1; + @as(*u8, @ptrCast(value.?)).* += 1; } test "implicit cast *[0]T to E![]const u8" { @@ -1284,11 +1284,11 @@ test "implicit cast *[0]T to E![]const u8" { var global_array: [4]u8 = undefined; test "cast from array reference to fn: comptime fn ptr" { - const f = @ptrCast(*align(1) const fn () callconv(.C) void, &global_array); + const f = @as(*align(1) const fn () callconv(.C) void, @ptrCast(&global_array)); try expect(@intFromPtr(f) == @intFromPtr(&global_array)); } test "cast from array reference to fn: runtime fn ptr" { - var f = @ptrCast(*align(1) const fn () callconv(.C) void, &global_array); + var f = @as(*align(1) const fn () callconv(.C) void, @ptrCast(&global_array)); try expect(@intFromPtr(f) == @intFromPtr(&global_array)); } @@ -1337,7 +1337,7 @@ test "assignment to optional pointer result loc" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct }; - try expect(foo.ptr.? == @ptrCast(*anyopaque, &global_struct)); + try expect(foo.ptr.? == @as(*anyopaque, @ptrCast(&global_struct))); } test "cast between *[N]void and []void" { @@ -1393,9 +1393,9 @@ test "cast f128 to narrower types" { const S = struct { fn doTheTest() !void { var x: f128 = 1234.0; - try expect(@as(f16, 1234.0) == @floatCast(f16, x)); - try expect(@as(f32, 1234.0) == @floatCast(f32, x)); - try expect(@as(f64, 1234.0) == @floatCast(f64, x)); + try expect(@as(f16, 1234.0) == @as(f16, @floatCast(x))); + try expect(@as(f32, 1234.0) == @as(f32, @floatCast(x))); + try expect(@as(f64, 1234.0) == @as(f64, @floatCast(x))); } }; try S.doTheTest(); @@ -1500,8 +1500,8 @@ test "coerce between pointers of compatible differently-named floats" { } test "peer type resolution of const and non-const pointer to array" { - const a = @ptrFromInt(*[1024]u8, 42); - const b = @ptrFromInt(*const [1024]u8, 42); + const a = @as(*[1024]u8, @ptrFromInt(42)); + const b = @as(*const [1024]u8, @ptrFromInt(42)); try std.testing.expect(@TypeOf(a, b) == *const [1024]u8); try std.testing.expect(a == b); } @@ -1512,7 +1512,7 @@ test "intFromFloat to zero-bit int" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const a: f32 = 0.0; - try comptime std.testing.expect(@intFromFloat(u0, a) == 0); + try comptime std.testing.expect(@as(u0, @intFromFloat(a)) == 0); } test "peer type resolution of function pointer and function body" { @@ -1547,10 +1547,10 @@ test "bitcast packed struct with u0" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const S = packed struct(u2) { a: u0, b: u2 }; - const s = @bitCast(S, @as(u2, 2)); + const s = @as(S, @bitCast(@as(u2, 2))); try expect(s.a == 0); try expect(s.b == 2); - const i = @bitCast(u2, s); + const i = @as(u2, @bitCast(s)); try expect(i == 2); } @@ -1560,7 +1560,7 @@ test "optional pointer coerced to optional allowzero pointer" { var p: ?*u32 = undefined; var q: ?*allowzero u32 = undefined; - p = @ptrFromInt(*u32, 4); + p = @as(*u32, @ptrFromInt(4)); q = p; try expect(@intFromPtr(q.?) == 4); } @@ -1583,7 +1583,7 @@ test "peer type resolution forms error union" { 0 => unreachable, 42 => error.AccessDenied, else => unreachable, - } else @intCast(u32, foo); + } else @as(u32, @intCast(foo)); try expect(try result == 123); } @@ -1623,8 +1623,8 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" const S = struct { fn doTheTest(comptime T: type, comptime s: T) !void { - var a: [:s]const T = @ptrFromInt(*const [2:s]T, 0x1000); - var b: []T = @ptrFromInt(*[3]T, 0x2000); + var a: [:s]const T = @as(*const [2:s]T, @ptrFromInt(0x1000)); + var b: []T = @as(*[3]T, @ptrFromInt(0x2000)); comptime assert(@TypeOf(a, b) == []const T); comptime assert(@TypeOf(b, a) == []const T); @@ -1634,8 +1634,8 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice" const R = @TypeOf(r1); - try expectEqual(@as(R, @ptrFromInt(*const [2:s]T, 0x1000)), r1); - try expectEqual(@as(R, @ptrFromInt(*const [3]T, 0x2000)), r2); + try expectEqual(@as(R, @as(*const [2:s]T, @ptrFromInt(0x1000))), r1); + try expectEqual(@as(R, @as(*const [3]T, @ptrFromInt(0x2000))), r2); } }; @@ -1815,7 +1815,7 @@ test "peer type resolution: three-way resolution combines error set and optional const E = error{Foo}; var a: E = error.Foo; - var b: *const [5:0]u8 = @ptrFromInt(*const [5:0]u8, 0x1000); + var b: *const [5:0]u8 = @as(*const [5:0]u8, @ptrFromInt(0x1000)); var c: ?[*:0]u8 = null; comptime assert(@TypeOf(a, b, c) == E!?[*:0]const u8); comptime assert(@TypeOf(a, c, b) == E!?[*:0]const u8); @@ -1844,7 +1844,7 @@ test "peer type resolution: three-way resolution combines error set and optional const T = @TypeOf(r1); try expectEqual(@as(T, error.Foo), r1); - try expectEqual(@as(T, @ptrFromInt([*:0]u8, 0x1000)), r2); + try expectEqual(@as(T, @as([*:0]u8, @ptrFromInt(0x1000))), r2); try expectEqual(@as(T, null), r3); } @@ -2114,7 +2114,7 @@ test "peer type resolution: many compatible pointers" { 4 => "foo-4", else => unreachable, }; - try expectEqualSlices(u8, expected, std.mem.span(@ptrCast([*:0]const u8, r))); + try expectEqualSlices(u8, expected, std.mem.span(@as([*:0]const u8, @ptrCast(r)))); } } diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig index 041ee193e88d..6d4f53040903 100644 --- a/test/behavior/cast_int.zig +++ b/test/behavior/cast_int.zig @@ -11,6 +11,6 @@ test "@intCast i32 to u7" { var x: u128 = maxInt(u128); var y: i32 = 120; - var z = x >> @intCast(u7, y); + var z = x >> @as(u7, @intCast(y)); try expect(z == 0xff); } diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index d327afb783e1..b0c5e9c91eb4 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -6,7 +6,7 @@ const ptr_size = @sizeOf(usize); test "type pun signed and unsigned as single pointer" { comptime { var x: u32 = 0; - const y = @ptrCast(*i32, &x); + const y = @as(*i32, @ptrCast(&x)); y.* = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } @@ -15,7 +15,7 @@ test "type pun signed and unsigned as single pointer" { test "type pun signed and unsigned as many pointer" { comptime { var x: u32 = 0; - const y = @ptrCast([*]i32, &x); + const y = @as([*]i32, @ptrCast(&x)); y[0] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } @@ -24,7 +24,7 @@ test "type pun signed and unsigned as many pointer" { test "type pun signed and unsigned as array pointer" { comptime { var x: u32 = 0; - const y = @ptrCast(*[1]i32, &x); + const y = @as(*[1]i32, @ptrCast(&x)); y[0] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } @@ -38,7 +38,7 @@ test "type pun signed and unsigned as offset many pointer" { comptime { var x: u32 = 0; - var y = @ptrCast([*]i32, &x); + var y = @as([*]i32, @ptrCast(&x)); y -= 10; y[10] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); @@ -53,7 +53,7 @@ test "type pun signed and unsigned as array pointer with pointer arithemtic" { comptime { var x: u32 = 0; - const y = @ptrCast([*]i32, &x) - 10; + const y = @as([*]i32, @ptrCast(&x)) - 10; const z: *[15]i32 = y[0..15]; z[10] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); @@ -64,9 +64,9 @@ test "type pun value and struct" { comptime { const StructOfU32 = extern struct { x: u32 }; var inst: StructOfU32 = .{ .x = 0 }; - @ptrCast(*i32, &inst.x).* = -1; + @as(*i32, @ptrCast(&inst.x)).* = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), inst.x); - @ptrCast(*i32, &inst).* = -2; + @as(*i32, @ptrCast(&inst)).* = -2; try testing.expectEqual(@as(u32, 0xFFFFFFFE), inst.x); } } @@ -81,8 +81,8 @@ test "type pun endianness" { comptime { const StructOfBytes = extern struct { x: [4]u8 }; var inst: StructOfBytes = .{ .x = [4]u8{ 0, 0, 0, 0 } }; - const structPtr = @ptrCast(*align(1) u32, &inst); - const arrayPtr = @ptrCast(*align(1) u32, &inst.x); + const structPtr = @as(*align(1) u32, @ptrCast(&inst)); + const arrayPtr = @as(*align(1) u32, @ptrCast(&inst.x)); inst.x[0] = 0xFE; inst.x[2] = 0xBE; try testing.expectEqual(bigToNativeEndian(u32, 0xFE00BE00), structPtr.*); @@ -124,8 +124,8 @@ fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize { @compileError("Mismatched sizes! " ++ @typeName(From) ++ " and " ++ @typeName(To) ++ " must have the same size!"); const array_len = @divExact(ptr_size, @sizeOf(From)); var result: usize = 0; - const pSource = @ptrCast(*align(1) const [array_len]From, &ptr); - const pResult = @ptrCast(*align(1) [array_len]To, &result); + const pSource = @as(*align(1) const [array_len]From, @ptrCast(&ptr)); + const pResult = @as(*align(1) [array_len]To, @ptrCast(&result)); var i: usize = 0; while (i < array_len) : (i += 1) { inline for (@typeInfo(To).Struct.fields) |f| { @@ -136,8 +136,8 @@ fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize { } fn doTypePunBitsTest(as_bits: *Bits) !void { - const as_u32 = @ptrCast(*align(1) u32, as_bits); - const as_bytes = @ptrCast(*[4]u8, as_bits); + const as_u32 = @as(*align(1) u32, @ptrCast(as_bits)); + const as_bytes = @as(*[4]u8, @ptrCast(as_bits)); as_u32.* = bigToNativeEndian(u32, 0xB0A7DEED); try testing.expectEqual(@as(u1, 0x00), as_bits.p0); try testing.expectEqual(@as(u4, 0x08), as_bits.p1); @@ -176,7 +176,7 @@ test "type pun bits" { comptime { var v: u32 = undefined; - try doTypePunBitsTest(@ptrCast(*Bits, &v)); + try doTypePunBitsTest(@as(*Bits, @ptrCast(&v))); } } @@ -194,7 +194,7 @@ test "basic pointer preservation" { comptime { const lazy_address = @intFromPtr(&imports.global_u32); try testing.expectEqual(@intFromPtr(&imports.global_u32), lazy_address); - try testing.expectEqual(&imports.global_u32, @ptrFromInt(*u32, lazy_address)); + try testing.expectEqual(&imports.global_u32, @as(*u32, @ptrFromInt(lazy_address))); } } @@ -207,8 +207,8 @@ test "byte copy preserves linker value" { const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; - const pSource = @ptrCast(*const [ptr_size]u8, &lazy); - const pResult = @ptrCast(*[ptr_size]u8, &result); + const pSource = @as(*const [ptr_size]u8, @ptrCast(&lazy)); + const pResult = @as(*[ptr_size]u8, @ptrCast(&result)); var i: usize = 0; while (i < ptr_size) : (i += 1) { pResult[i] = pSource[i]; @@ -230,8 +230,8 @@ test "unordered byte copy preserves linker value" { const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; - const pSource = @ptrCast(*const [ptr_size]u8, &lazy); - const pResult = @ptrCast(*[ptr_size]u8, &result); + const pSource = @as(*const [ptr_size]u8, @ptrCast(&lazy)); + const pResult = @as(*[ptr_size]u8, @ptrCast(&result)); if (ptr_size > 8) @compileError("This array needs to be expanded for platform with very big pointers"); const shuffled_indices = [_]usize{ 4, 5, 2, 6, 1, 3, 0, 7 }; for (shuffled_indices) |i| { @@ -274,12 +274,12 @@ test "dance on linker values" { arr[0] = @intFromPtr(&imports.global_u32); arr[1] = @intFromPtr(&imports.global_u32); - const weird_ptr = @ptrCast([*]Bits, @ptrCast([*]u8, &arr) + @sizeOf(usize) - 3); + const weird_ptr = @as([*]Bits, @ptrCast(@as([*]u8, @ptrCast(&arr)) + @sizeOf(usize) - 3)); try doTypePunBitsTest(&weird_ptr[0]); if (ptr_size > @sizeOf(Bits)) try doTypePunBitsTest(&weird_ptr[1]); - var arr_bytes = @ptrCast(*[2][ptr_size]u8, &arr); + var arr_bytes = @as(*[2][ptr_size]u8, @ptrCast(&arr)); var rebuilt_bytes: [ptr_size]u8 = undefined; var i: usize = 0; @@ -290,7 +290,7 @@ test "dance on linker values" { rebuilt_bytes[i] = arr_bytes[1][i]; } - try testing.expectEqual(&imports.global_u32, @ptrFromInt(*u32, @bitCast(usize, rebuilt_bytes))); + try testing.expectEqual(&imports.global_u32, @as(*u32, @ptrFromInt(@as(usize, @bitCast(rebuilt_bytes))))); } } @@ -316,7 +316,7 @@ test "offset array ptr by element size" { try testing.expectEqual(@intFromPtr(&arr[2]), address + 2 * @sizeOf(VirtualStruct)); try testing.expectEqual(@intFromPtr(&arr[3]), address + @sizeOf(VirtualStruct) * 3); - const secondElement = @ptrFromInt(*VirtualStruct, @intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct)); + const secondElement = @as(*VirtualStruct, @ptrFromInt(@intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct))); try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), secondElement.x); } } @@ -334,15 +334,15 @@ test "offset instance by field size" { var ptr = @intFromPtr(&inst); ptr -= 4; ptr += @offsetOf(VirtualStruct, "x"); - try testing.expectEqual(@as(u32, 0), @ptrFromInt([*]u32, ptr)[1]); + try testing.expectEqual(@as(u32, 0), @as([*]u32, @ptrFromInt(ptr))[1]); ptr -= @offsetOf(VirtualStruct, "x"); ptr += @offsetOf(VirtualStruct, "y"); - try testing.expectEqual(@as(u32, 1), @ptrFromInt([*]u32, ptr)[1]); + try testing.expectEqual(@as(u32, 1), @as([*]u32, @ptrFromInt(ptr))[1]); ptr = ptr - @offsetOf(VirtualStruct, "y") + @offsetOf(VirtualStruct, "z"); - try testing.expectEqual(@as(u32, 2), @ptrFromInt([*]u32, ptr)[1]); + try testing.expectEqual(@as(u32, 2), @as([*]u32, @ptrFromInt(ptr))[1]); ptr = @intFromPtr(&inst.z) - 4 - @offsetOf(VirtualStruct, "z"); ptr += @offsetOf(VirtualStruct, "w"); - try testing.expectEqual(@as(u32, 3), @ptrFromInt(*u32, ptr + 4).*); + try testing.expectEqual(@as(u32, 3), @as(*u32, @ptrFromInt(ptr + 4)).*); } } @@ -363,13 +363,13 @@ test "offset field ptr by enclosing array element size" { var i: usize = 0; while (i < 4) : (i += 1) { - var ptr: [*]u8 = @ptrCast([*]u8, &arr[0]); + var ptr: [*]u8 = @as([*]u8, @ptrCast(&arr[0])); ptr += i; ptr += @offsetOf(VirtualStruct, "x"); var j: usize = 0; while (j < 4) : (j += 1) { const base = ptr + j * @sizeOf(VirtualStruct); - try testing.expectEqual(@intCast(u8, i * 4 + j), base[0]); + try testing.expectEqual(@as(u8, @intCast(i * 4 + j)), base[0]); } } } @@ -393,7 +393,7 @@ test "accessing reinterpreted memory of parent object" { .c = 2.6, }; const ptr = &x.b[0]; - const b = @ptrCast([*c]const u8, ptr)[5]; + const b = @as([*c]const u8, @ptrCast(ptr))[5]; try testing.expect(b == expected); } } @@ -407,11 +407,11 @@ test "bitcast packed union to integer" { comptime { const a = U{ .x = 1 }; const b = U{ .y = 2 }; - const cast_a = @bitCast(u2, a); - const cast_b = @bitCast(u2, b); + const cast_a = @as(u2, @bitCast(a)); + const cast_b = @as(u2, @bitCast(b)); // truncated because the upper bit is garbage memory that we don't care about - try testing.expectEqual(@as(u1, 1), @truncate(u1, cast_a)); + try testing.expectEqual(@as(u1, 1), @as(u1, @truncate(cast_a))); try testing.expectEqual(@as(u2, 2), cast_b); } } @@ -435,6 +435,6 @@ test "dereference undefined pointer to zero-bit type" { test "type pun extern struct" { const S = extern struct { f: u8 }; comptime var s = S{ .f = 123 }; - @ptrCast(*u8, &s).* = 72; + @as(*u8, @ptrCast(&s)).* = 72; try testing.expectEqual(@as(u8, 72), s.f); } diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 1076f5e3ea34..ffb254f765cf 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -20,7 +20,7 @@ test "enum to int" { } fn testIntToEnumEval(x: i32) !void { - try expect(@enumFromInt(IntToEnumNumber, x) == IntToEnumNumber.Three); + try expect(@as(IntToEnumNumber, @enumFromInt(x)) == IntToEnumNumber.Three); } const IntToEnumNumber = enum { Zero, One, Two, Three, Four }; @@ -629,7 +629,7 @@ test "non-exhaustive enum" { .b => true, _ => false, }); - e = @enumFromInt(E, 12); + e = @as(E, @enumFromInt(12)); try expect(switch (e) { .a => false, .b => false, @@ -648,9 +648,9 @@ test "non-exhaustive enum" { }); try expect(@typeInfo(E).Enum.fields.len == 2); - e = @enumFromInt(E, 12); + e = @as(E, @enumFromInt(12)); try expect(@intFromEnum(e) == 12); - e = @enumFromInt(E, y); + e = @as(E, @enumFromInt(y)); try expect(@intFromEnum(e) == 52); try expect(@typeInfo(E).Enum.is_exhaustive == false); } @@ -666,7 +666,7 @@ test "empty non-exhaustive enum" { const E = enum(u8) { _ }; fn doTheTest(y: u8) !void { - var e = @enumFromInt(E, y); + var e = @as(E, @enumFromInt(y)); try expect(switch (e) { _ => true, }); @@ -693,7 +693,7 @@ test "single field non-exhaustive enum" { .a => true, _ => false, }); - e = @enumFromInt(E, 12); + e = @as(E, @enumFromInt(12)); try expect(switch (e) { .a => false, _ => true, @@ -709,7 +709,7 @@ test "single field non-exhaustive enum" { else => false, }); - try expect(@intFromEnum(@enumFromInt(E, y)) == y); + try expect(@intFromEnum(@as(E, @enumFromInt(y))) == y); try expect(@typeInfo(E).Enum.fields.len == 1); try expect(@typeInfo(E).Enum.is_exhaustive == false); } @@ -741,8 +741,8 @@ const MultipleChoice2 = enum(u32) { }; test "cast integer literal to enum" { - try expect(@enumFromInt(MultipleChoice2, 0) == MultipleChoice2.Unspecified1); - try expect(@enumFromInt(MultipleChoice2, 40) == MultipleChoice2.B); + try expect(@as(MultipleChoice2, @enumFromInt(0)) == MultipleChoice2.Unspecified1); + try expect(@as(MultipleChoice2, @enumFromInt(40)) == MultipleChoice2.B); } test "enum with specified and unspecified tag values" { @@ -1155,7 +1155,7 @@ test "size of enum with only one tag which has explicit integer tag type" { var s1: S1 = undefined; s1.e = .nope; try expect(s1.e == .nope); - const ptr = @ptrCast(*u8, &s1); + const ptr = @as(*u8, @ptrCast(&s1)); try expect(ptr.* == 10); var s0: S0 = undefined; @@ -1183,7 +1183,7 @@ test "Non-exhaustive enum with nonstandard int size behaves correctly" { test "runtime int to enum with one possible value" { const E = enum { one }; var runtime: usize = 0; - if (@enumFromInt(E, runtime) != .one) { + if (@as(E, @enumFromInt(runtime)) != .one) { @compileError("test failed"); } } @@ -1194,7 +1194,7 @@ test "enum tag from a local variable" { return enum(Inner) { _ }; } }; - const i = @enumFromInt(S.Int(u32), 0); + const i = @as(S.Int(u32), @enumFromInt(0)); try std.testing.expect(@intFromEnum(i) == 0); } @@ -1203,12 +1203,12 @@ test "auto-numbered enum with signed tag type" { try std.testing.expectEqual(@as(i32, 0), @intFromEnum(E.a)); try std.testing.expectEqual(@as(i32, 1), @intFromEnum(E.b)); - try std.testing.expectEqual(E.a, @enumFromInt(E, 0)); - try std.testing.expectEqual(E.b, @enumFromInt(E, 1)); - try std.testing.expectEqual(E.a, @enumFromInt(E, @as(i32, 0))); - try std.testing.expectEqual(E.b, @enumFromInt(E, @as(i32, 1))); - try std.testing.expectEqual(E.a, @enumFromInt(E, @as(u32, 0))); - try std.testing.expectEqual(E.b, @enumFromInt(E, @as(u32, 1))); + try std.testing.expectEqual(E.a, @as(E, @enumFromInt(0))); + try std.testing.expectEqual(E.b, @as(E, @enumFromInt(1))); + try std.testing.expectEqual(E.a, @as(E, @enumFromInt(@as(i32, 0)))); + try std.testing.expectEqual(E.b, @as(E, @enumFromInt(@as(i32, 1)))); + try std.testing.expectEqual(E.a, @as(E, @enumFromInt(@as(u32, 0)))); + try std.testing.expectEqual(E.b, @as(E, @enumFromInt(@as(u32, 1)))); try std.testing.expectEqualStrings("a", @tagName(E.a)); try std.testing.expectEqualStrings("b", @tagName(E.b)); } diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 14b0eca03084..06062ac66cfd 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -234,9 +234,9 @@ const Set1 = error{ A, B }; const Set2 = error{ A, C }; fn testExplicitErrorSetCast(set1: Set1) !void { - var x = @errSetCast(Set2, set1); + var x = @as(Set2, @errSetCast(set1)); try expect(@TypeOf(x) == Set2); - var y = @errSetCast(Set1, x); + var y = @as(Set1, @errSetCast(x)); try expect(@TypeOf(y) == Set1); try expect(y == error.A); } diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 85dc5e29b5d6..f2b91e66ac79 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -9,7 +9,7 @@ test "compile time recursion" { try expect(some_data.len == 21); } -var some_data: [@intCast(usize, fibonacci(7))]u8 = undefined; +var some_data: [@as(usize, @intCast(fibonacci(7)))]u8 = undefined; fn fibonacci(x: i32) i32 { if (x <= 1) return 1; return fibonacci(x - 1) + fibonacci(x - 2); @@ -123,7 +123,7 @@ fn fnWithSetRuntimeSafety() i32 { test "compile-time downcast when the bits fit" { comptime { const spartan_count: u16 = 255; - const byte = @intCast(u8, spartan_count); + const byte = @as(u8, @intCast(spartan_count)); try expect(byte == 255); } } @@ -149,7 +149,7 @@ test "a type constructed in a global expression" { l.array[0] = 10; l.array[1] = 11; l.array[2] = 12; - const ptr = @ptrCast([*]u8, &l.array); + const ptr = @as([*]u8, @ptrCast(&l.array)); try expect(ptr[0] == 10); try expect(ptr[1] == 11); try expect(ptr[2] == 12); @@ -332,7 +332,7 @@ fn generateTable(comptime T: type) [1010]T { var res: [1010]T = undefined; var i: usize = 0; while (i < 1010) : (i += 1) { - res[i] = @intCast(T, i); + res[i] = @as(T, @intCast(i)); } return res; } @@ -460,7 +460,7 @@ test "binary math operator in partially inlined function" { var b: [16]u8 = undefined; for (&b, 0..) |*r, i| - r.* = @intCast(u8, i + 1); + r.* = @as(u8, @intCast(i + 1)); copyWithPartialInline(s[0..], b[0..]); try expect(s[0] == 0x1020304); @@ -942,7 +942,7 @@ test "comptime pointer load through elem_ptr" { .x = i, }; } - var ptr = @ptrCast([*]S, &array); + var ptr = @as([*]S, @ptrCast(&array)); var x = ptr[0].x; assert(x == 0); ptr += 1; @@ -1281,9 +1281,9 @@ test "comptime write through extern struct reinterpreted as array" { c: u8, }; var s: S = undefined; - @ptrCast(*[3]u8, &s)[0] = 1; - @ptrCast(*[3]u8, &s)[1] = 2; - @ptrCast(*[3]u8, &s)[2] = 3; + @as(*[3]u8, @ptrCast(&s))[0] = 1; + @as(*[3]u8, @ptrCast(&s))[1] = 2; + @as(*[3]u8, @ptrCast(&s))[2] = 3; assert(s.a == 1); assert(s.b == 2); assert(s.c == 3); @@ -1371,7 +1371,7 @@ test "lazy value is resolved as slice operand" { var a: [512]u64 = undefined; const ptr1 = a[0..@sizeOf(A)]; - const ptr2 = @ptrCast([*]u8, &a)[0..@sizeOf(A)]; + const ptr2 = @as([*]u8, @ptrCast(&a))[0..@sizeOf(A)]; try expect(@intFromPtr(ptr1) == @intFromPtr(ptr2)); try expect(ptr1.len == ptr2.len); } diff --git a/test/behavior/export.zig b/test/behavior/export.zig index 4928e8672542..4751ccafe58c 100644 --- a/test/behavior/export.zig +++ b/test/behavior/export.zig @@ -7,7 +7,7 @@ const builtin = @import("builtin"); // can't really run this test but we can make sure it has no compile error // and generates code -const vram = @ptrFromInt([*]volatile u8, 0x20000000)[0..0x8000]; +const vram = @as([*]volatile u8, @ptrFromInt(0x20000000))[0..0x8000]; export fn writeToVRam() void { vram[0] = 'X'; } diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 56f3885a4aa5..e21645ae8fe7 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -94,7 +94,7 @@ test "negative f128 intFromFloat at compile-time" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const a: f128 = -2; - var b = @intFromFloat(i64, a); + var b = @as(i64, @intFromFloat(a)); try expect(@as(i64, -2) == b); } @@ -387,11 +387,11 @@ fn testLog() !void { } { var a: f32 = e; - try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff))); + try expect(@log(a) == 1 or @log(a) == @as(f32, @bitCast(@as(u32, 0x3f7fffff)))); } { var a: f64 = e; - try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000))); + try expect(@log(a) == 1 or @log(a) == @as(f64, @bitCast(@as(u64, 0x3ff0000000000000)))); } inline for ([_]type{ f16, f32, f64 }) |ty| { const eps = epsForType(ty); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 6c7e12796441..e7b7e63e33b0 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -326,7 +326,7 @@ test "function pointers" { &fn4, }; for (fns, 0..) |f, i| { - try expect(f() == @intCast(u32, i) + 5); + try expect(f() == @as(u32, @intCast(i)) + 5); } } fn fn1() u32 { @@ -512,8 +512,8 @@ test "using @ptrCast on function pointers" { fn run() !void { const a = A{ .data = "abcd".* }; - const casted_fn = @ptrCast(*const fn (*const anyopaque, usize) *const u8, &at); - const casted_impl = @ptrCast(*const anyopaque, &a); + const casted_fn = @as(*const fn (*const anyopaque, usize) *const u8, @ptrCast(&at)); + const casted_impl = @as(*const anyopaque, @ptrCast(&a)); const ptr = casted_fn(casted_impl, 2); try expect(ptr.* == 'c'); } @@ -575,7 +575,7 @@ test "lazy values passed to anytype parameter" { try B.foo(.{ .x = @sizeOf(B) }); const C = struct {}; - try expect(@truncate(u32, @sizeOf(C)) == 0); + try expect(@as(u32, @truncate(@sizeOf(C))) == 0); const D = struct {}; try expect(@sizeOf(D) << 1 == 0); diff --git a/test/behavior/fn_in_struct_in_comptime.zig b/test/behavior/fn_in_struct_in_comptime.zig index b31b377c048b..0acadbc5ea9c 100644 --- a/test/behavior/fn_in_struct_in_comptime.zig +++ b/test/behavior/fn_in_struct_in_comptime.zig @@ -14,5 +14,5 @@ fn get_foo() fn (*u8) usize { test "define a function in an anonymous struct in comptime" { const foo = get_foo(); - try expect(foo(@ptrFromInt(*u8, 12345)) == 12345); + try expect(foo(@as(*u8, @ptrFromInt(12345))) == 12345); } diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 12b82c44a4b4..f751d35d966c 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -84,7 +84,7 @@ test "basic for loop" { } for (array, 0..) |item, index| { _ = item; - buffer[buf_index] = @intCast(u8, index); + buffer[buf_index] = @as(u8, @intCast(index)); buf_index += 1; } const array_ptr = &array; @@ -94,7 +94,7 @@ test "basic for loop" { } for (array_ptr, 0..) |item, index| { _ = item; - buffer[buf_index] = @intCast(u8, index); + buffer[buf_index] = @as(u8, @intCast(index)); buf_index += 1; } const unknown_size: []const u8 = &array; @@ -103,7 +103,7 @@ test "basic for loop" { buf_index += 1; } for (unknown_size, 0..) |_, index| { - buffer[buf_index] = @intCast(u8, index); + buffer[buf_index] = @as(u8, @intCast(index)); buf_index += 1; } @@ -208,7 +208,7 @@ test "for on slice with allowzero ptr" { const S = struct { fn doTheTest(slice: []const u8) !void { - var ptr = @ptrCast([*]allowzero const u8, slice.ptr)[0..slice.len]; + var ptr = @as([*]allowzero const u8, @ptrCast(slice.ptr))[0..slice.len]; for (ptr, 0..) |x, i| try expect(x == i + 1); for (ptr, 0..) |*x, i| try expect(x.* == i + 1); } @@ -393,7 +393,7 @@ test "raw pointer and counter" { const ptr: [*]u8 = &buf; for (ptr, 0..4) |*a, b| { - a.* = @intCast(u8, 'A' + b); + a.* = @as(u8, @intCast('A' + b)); } try expect(buf[0] == 'A'); diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index f0c8516f6721..7d4a841a62a6 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -97,7 +97,7 @@ test "type constructed by comptime function call" { l.array[0] = 10; l.array[1] = 11; l.array[2] = 12; - const ptr = @ptrCast([*]u8, &l.array); + const ptr = @as([*]u8, @ptrCast(&l.array)); try expect(ptr[0] == 10); try expect(ptr[1] == 11); try expect(ptr[2] == 12); @@ -171,7 +171,7 @@ fn getByte(ptr: ?*const u8) u8 { return ptr.?.*; } fn getFirstByte(comptime T: type, mem: []const T) u8 { - return getByte(@ptrCast(*const u8, &mem[0])); + return getByte(@as(*const u8, @ptrCast(&mem[0]))); } test "generic fn keeps non-generic parameter types" { @@ -428,7 +428,7 @@ test "null sentinel pointer passed as generic argument" { try std.testing.expect(@intFromPtr(a) == 8); } }; - try S.doTheTest((@ptrFromInt([*:null]const [*c]const u8, 8))); + try S.doTheTest((@as([*:null]const [*c]const u8, @ptrFromInt(8)))); } test "generic function passed as comptime argument" { diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig index 6fd2c192a250..42f0b00922ad 100644 --- a/test/behavior/int128.zig +++ b/test/behavior/int128.zig @@ -38,7 +38,7 @@ test "undefined 128 bit int" { var undef: u128 = undefined; var undef_signed: i128 = undefined; - try expect(undef == 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa and @bitCast(u128, undef_signed) == undef); + try expect(undef == 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa and @as(u128, @bitCast(undef_signed)) == undef); } test "int128" { @@ -49,7 +49,7 @@ test "int128" { var buff: i128 = -1; try expect(buff < 0 and (buff + 1) == 0); - try expect(@intCast(i8, buff) == @as(i8, -1)); + try expect(@as(i8, @intCast(buff)) == @as(i8, -1)); buff = minInt(i128); try expect(buff < 0); @@ -73,16 +73,16 @@ test "truncate int128" { { var buff: u128 = maxInt(u128); - try expect(@truncate(u64, buff) == maxInt(u64)); - try expect(@truncate(u90, buff) == maxInt(u90)); - try expect(@truncate(u128, buff) == maxInt(u128)); + try expect(@as(u64, @truncate(buff)) == maxInt(u64)); + try expect(@as(u90, @truncate(buff)) == maxInt(u90)); + try expect(@as(u128, @truncate(buff)) == maxInt(u128)); } { var buff: i128 = maxInt(i128); - try expect(@truncate(i64, buff) == -1); - try expect(@truncate(i90, buff) == -1); - try expect(@truncate(i128, buff) == maxInt(i128)); + try expect(@as(i64, @truncate(buff)) == -1); + try expect(@as(i90, @truncate(buff)) == -1); + try expect(@as(i128, @truncate(buff)) == maxInt(i128)); } } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 42c328c7d4d8..3b5d4876fd35 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -391,11 +391,11 @@ test "binary not 128-bit" { break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa; }); try expect(comptime x: { - break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)); + break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); }); try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa); - try testBinaryNot128(i128, @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); + try testBinaryNot128(i128, @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)))); } fn testBinaryNot128(comptime Type: type, x: Type) !void { @@ -1156,29 +1156,29 @@ test "quad hex float literal parsing accurate" { // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing. const expected: u128 = 0x3fff1111222233334444555566667777; - try expect(@bitCast(u128, a) == expected); + try expect(@as(u128, @bitCast(a)) == expected); // non-normalized const b: f128 = 0x11.111222233334444555566667777p-4; - try expect(@bitCast(u128, b) == expected); + try expect(@as(u128, @bitCast(b)) == expected); const S = struct { fn doTheTest() !void { { var f: f128 = 0x1.2eab345678439abcdefea56782346p+5; - try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234); + try expect(@as(u128, @bitCast(f)) == 0x40042eab345678439abcdefea5678234); } { var f: f128 = 0x1.edcb34a235253948765432134674fp-1; - try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134675); // round-to-even + try expect(@as(u128, @bitCast(f)) == 0x3ffeedcb34a235253948765432134675); // round-to-even } { var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50; - try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50); + try expect(@as(u128, @bitCast(f)) == 0x3fcd353e45674d89abacc3a2ebf3ff50); } { var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9; - try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568); + try expect(@as(u128, @bitCast(f)) == 0x3ff6ed8764648369535adf4be3214568); } const exp2ft = [_]f64{ 0x1.6a09e667f3bcdp-1, @@ -1233,7 +1233,7 @@ test "quad hex float literal parsing accurate" { }; for (exp2ft, 0..) |x, i| { - try expect(@bitCast(u64, x) == answers[i]); + try expect(@as(u64, @bitCast(x)) == answers[i]); } } }; @@ -1586,7 +1586,7 @@ test "signed zeros are represented properly" { fn testOne(comptime T: type) !void { const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); var as_fp_val = -@as(T, 0.0); - var as_uint_val = @bitCast(ST, as_fp_val); + var as_uint_val = @as(ST, @bitCast(as_fp_val)); // Ensure the sign bit is set. try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1); } diff --git a/test/behavior/memcpy.zig b/test/behavior/memcpy.zig index 3a87b66fb1c6..f1776dfe570c 100644 --- a/test/behavior/memcpy.zig +++ b/test/behavior/memcpy.zig @@ -59,7 +59,7 @@ fn testMemcpyDestManyPtr() !void { var str = "hello".*; var buf: [5]u8 = undefined; var len: usize = 5; - @memcpy(@ptrCast([*]u8, &buf), @ptrCast([*]const u8, &str)[0..len]); + @memcpy(@as([*]u8, @ptrCast(&buf)), @as([*]const u8, @ptrCast(&str))[0..len]); try expect(buf[0] == 'h'); try expect(buf[1] == 'e'); try expect(buf[2] == 'l'); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 037fee74ee26..12cc027ef4de 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -166,7 +166,7 @@ test "correct sizeOf and offsets in packed structs" { try expectEqual(4, @sizeOf(PStruct)); if (native_endian == .Little) { - const s1 = @bitCast(PStruct, @as(u32, 0x12345678)); + const s1 = @as(PStruct, @bitCast(@as(u32, 0x12345678))); try expectEqual(false, s1.bool_a); try expectEqual(false, s1.bool_b); try expectEqual(false, s1.bool_c); @@ -180,7 +180,7 @@ test "correct sizeOf and offsets in packed structs" { try expectEqual(@as(u10, 0b1101000101), s1.u10_a); try expectEqual(@as(u10, 0b0001001000), s1.u10_b); - const s2 = @bitCast(packed struct { x: u1, y: u7, z: u24 }, @as(u32, 0xd5c71ff4)); + const s2 = @as(packed struct { x: u1, y: u7, z: u24 }, @bitCast(@as(u32, 0xd5c71ff4))); try expectEqual(@as(u1, 0), s2.x); try expectEqual(@as(u7, 0b1111010), s2.y); try expectEqual(@as(u24, 0xd5c71f), s2.z); @@ -207,7 +207,7 @@ test "nested packed structs" { try expectEqual(24, @bitOffsetOf(S3, "y")); if (native_endian == .Little) { - const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3; + const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3; try expectEqual(@as(u8, 0xf4), s3.x.a); try expectEqual(@as(u8, 0x1f), s3.x.b); try expectEqual(@as(u8, 0xc7), s3.x.c); @@ -600,7 +600,7 @@ test "packed struct initialized in bitcast" { const T = packed struct { val: u8 }; var val: u8 = 123; - const t = @bitCast(u8, T{ .val = val }); + const t = @as(u8, @bitCast(T{ .val = val })); try expect(t == val); } @@ -627,7 +627,7 @@ test "pointer to container level packed struct field" { }, var arr = [_]u32{0} ** 2; }; - @ptrCast(*S, &S.arr[0]).other_bits.enable_3 = true; + @as(*S, @ptrCast(&S.arr[0])).other_bits.enable_3 = true; try expect(S.arr[0] == 0x10000000); } diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index 62dd178fd5b6..9e476572abd6 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -25,7 +25,7 @@ test "packed struct explicit backing integer" { try expectEqual(24, @bitOffsetOf(S3, "y")); if (native_endian == .Little) { - const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3; + const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3; try expectEqual(@as(u8, 0xf4), s3.x.a); try expectEqual(@as(u8, 0x1f), s3.x.b); try expectEqual(@as(u8, 0xc7), s3.x.c); diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 4e04fe580c23..d007e7b48082 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -184,8 +184,8 @@ test "implicit cast error unions with non-optional to optional pointer" { } test "compare equality of optional and non-optional pointer" { - const a = @ptrFromInt(*const usize, 0x12345678); - const b = @ptrFromInt(?*usize, 0x12345678); + const a = @as(*const usize, @ptrFromInt(0x12345678)); + const b = @as(?*usize, @ptrFromInt(0x12345678)); try expect(a == b); try expect(b == a); } @@ -197,7 +197,7 @@ test "allowzero pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - var ptr = @ptrFromInt([*]allowzero i32, 0); + var ptr = @as([*]allowzero i32, @ptrFromInt(0)); var opt_ptr: ?[*]allowzero i32 = ptr; try expect(opt_ptr != null); try expect(@intFromPtr(ptr) == 0); @@ -286,9 +286,9 @@ test "null terminated pointer" { const S = struct { fn doTheTest() !void { var array_with_zero = [_:0]u8{ 'h', 'e', 'l', 'l', 'o' }; - var zero_ptr: [*:0]const u8 = @ptrCast([*:0]const u8, &array_with_zero); + var zero_ptr: [*:0]const u8 = @as([*:0]const u8, @ptrCast(&array_with_zero)); var no_zero_ptr: [*]const u8 = zero_ptr; - var zero_ptr_again = @ptrCast([*:0]const u8, no_zero_ptr); + var zero_ptr_again = @as([*:0]const u8, @ptrCast(no_zero_ptr)); try expect(std.mem.eql(u8, std.mem.sliceTo(zero_ptr_again, 0), "hello")); } }; @@ -367,7 +367,7 @@ test "pointer sentinel with +inf" { } test "pointer to array at fixed address" { - const array = @ptrFromInt(*volatile [2]u32, 0x10); + const array = @as(*volatile [2]u32, @ptrFromInt(0x10)); // Silly check just to reference `array` try expect(@intFromPtr(&array[0]) == 0x10); try expect(@intFromPtr(&array[1]) == 0x14); @@ -406,13 +406,13 @@ test "pointer arithmetic affects the alignment" { test "@intFromPtr on null optional at comptime" { { - const pointer = @ptrFromInt(?*u8, 0x000); + const pointer = @as(?*u8, @ptrFromInt(0x000)); const x = @intFromPtr(pointer); _ = x; try comptime expect(0 == @intFromPtr(pointer)); } { - const pointer = @ptrFromInt(?*u8, 0xf00); + const pointer = @as(?*u8, @ptrFromInt(0xf00)); try comptime expect(0xf00 == @intFromPtr(pointer)); } } @@ -463,8 +463,8 @@ test "element pointer arithmetic to slice" { }; const elem_ptr = &cases[0]; // *[2]i32 - const many = @ptrCast([*][2]i32, elem_ptr); - const many_elem = @ptrCast(*[2]i32, &many[1]); + const many = @as([*][2]i32, @ptrCast(elem_ptr)); + const many_elem = @as(*[2]i32, @ptrCast(&many[1])); const items: []i32 = many_elem; try testing.expect(items.len == 2); try testing.expect(items[1] == 3); @@ -512,7 +512,7 @@ test "ptrCast comptime known slice to C pointer" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const s: [:0]const u8 = "foo"; - var p = @ptrCast([*c]const u8, s); + var p = @as([*c]const u8, @ptrCast(s)); try std.testing.expectEqualStrings(s, std.mem.sliceTo(p, 0)); } @@ -550,7 +550,7 @@ test "pointer to array has explicit alignment" { const Base = extern struct { a: u8 }; const Base2 = extern struct { a: u8 }; fn func(ptr: *[4]Base) *align(1) [4]Base2 { - return @alignCast(1, @ptrCast(*[4]Base2, ptr)); + return @alignCast(@as(*[4]Base2, @ptrCast(ptr))); } }; var bases = [_]S.Base{.{ .a = 2 }} ** 4; diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig index 51146b14c8c7..da152d4dc525 100644 --- a/test/behavior/popcount.zig +++ b/test/behavior/popcount.zig @@ -63,7 +63,7 @@ fn testPopCountIntegers() !void { try expect(@popCount(x) == 2); } comptime { - try expect(@popCount(@bitCast(u8, @as(i8, -120))) == 2); + try expect(@popCount(@as(u8, @bitCast(@as(i8, -120)))) == 2); } } diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index aadae132d969..3a2ec9db190f 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -16,7 +16,7 @@ fn testReinterpretBytesAsInteger() !void { .Little => 0xab785634, .Big => 0x345678ab, }; - try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected); + try expect(@as(*align(1) const u32, @ptrCast(bytes[1..5])).* == expected); } test "reinterpret an array over multiple elements, with no well-defined layout" { @@ -32,7 +32,7 @@ test "reinterpret an array over multiple elements, with no well-defined layout" fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void { const bytes: ?[5]?u8 = [5]?u8{ 0x12, 0x34, 0x56, 0x78, 0x9a }; const ptr = &bytes.?[1]; - const copy: [4]?u8 = @ptrCast(*const [4]?u8, ptr).*; + const copy: [4]?u8 = @as(*const [4]?u8, @ptrCast(ptr)).*; _ = copy; //try expect(@ptrCast(*align(1)?u8, bytes[1..5]).* == ); } @@ -51,7 +51,7 @@ fn testReinterpretStructWrappedBytesAsInteger() !void { .Little => 0xab785634, .Big => 0x345678ab, }; - try expect(@ptrCast(*align(1) const u32, obj.bytes[1..5]).* == expected); + try expect(@as(*align(1) const u32, @ptrCast(obj.bytes[1..5])).* == expected); } test "reinterpret bytes of an array into an extern struct" { @@ -71,7 +71,7 @@ fn testReinterpretBytesAsExternStruct() !void { c: u8, }; - var ptr = @ptrCast(*const S, &bytes); + var ptr = @as(*const S, @ptrCast(&bytes)); var val = ptr.c; try expect(val == 5); } @@ -95,7 +95,7 @@ fn testReinterpretExternStructAsExternStruct() !void { a: u32 align(2), c: u8, }; - var ptr = @ptrCast(*const S2, &bytes); + var ptr = @as(*const S2, @ptrCast(&bytes)); var val = ptr.c; try expect(val == 5); } @@ -121,7 +121,7 @@ fn testReinterpretOverAlignedExternStructAsExternStruct() !void { a2: u16, c: u8, }; - var ptr = @ptrCast(*const S2, &bytes); + var ptr = @as(*const S2, @ptrCast(&bytes)); var val = ptr.c; try expect(val == 5); } @@ -138,13 +138,13 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" { a: u32 align(2), c: u8, }; - comptime var ptr = @ptrCast(*const S, &bytes); + comptime var ptr = @as(*const S, @ptrCast(&bytes)); var val = &ptr.c; try expect(val.* == 5); // Test lowering an elem ptr comptime var src_value = S{ .a = 15, .c = 5 }; - comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value); + comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value)); var val2 = &ptr2[4]; try expect(val2.* == 5); } @@ -161,13 +161,13 @@ test "lower reinterpreted comptime field ptr" { a: u32, c: u8, }; - comptime var ptr = @ptrCast(*const S, &bytes); + comptime var ptr = @as(*const S, @ptrCast(&bytes)); var val = &ptr.c; try expect(val.* == 5); // Test lowering an elem ptr comptime var src_value = S{ .a = 15, .c = 5 }; - comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value); + comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value)); var val2 = &ptr2[4]; try expect(val2.* == 5); } @@ -190,27 +190,17 @@ const Bytes = struct { pub fn init(v: u32) Bytes { var res: Bytes = undefined; - @ptrCast(*align(1) u32, &res.bytes).* = v; + @as(*align(1) u32, @ptrCast(&res.bytes)).* = v; return res; } }; -test "comptime ptrcast keeps larger alignment" { - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - - comptime { - const a: u32 = 1234; - const p = @ptrCast([*]const u8, &a); - try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8); - } -} - test "ptrcast of const integer has the correct object size" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - const is_value = ~@intCast(isize, std.math.minInt(isize)); - const is_bytes = @ptrCast([*]const u8, &is_value)[0..@sizeOf(isize)]; + const is_value = ~@as(isize, @intCast(std.math.minInt(isize))); + const is_bytes = @as([*]const u8, @ptrCast(&is_value))[0..@sizeOf(isize)]; if (@sizeOf(isize) == 8) { switch (native_endian) { .Little => { @@ -248,7 +238,7 @@ test "implicit optional pointer to optional anyopaque pointer" { var buf: [4]u8 = "aoeu".*; var x: ?[*]u8 = &buf; var y: ?*anyopaque = x; - var z = @ptrCast(*[4]u8, y); + var z = @as(*[4]u8, @ptrCast(y)); try expect(std.mem.eql(u8, z, "aoeu")); } @@ -260,7 +250,7 @@ test "@ptrCast slice to slice" { const S = struct { fn foo(slice: []u32) []i32 { - return @ptrCast([]i32, slice); + return @as([]i32, @ptrCast(slice)); } }; var buf: [4]u32 = .{ 0, 0, 0, 0 }; @@ -277,7 +267,7 @@ test "comptime @ptrCast a subset of an array, then write through it" { comptime { var buff: [16]u8 align(4) = undefined; - const len_bytes = @ptrCast(*u32, &buff); + const len_bytes = @as(*u32, @ptrCast(&buff)); len_bytes.* = 16; std.mem.copy(u8, buff[4..], "abcdef"); } @@ -286,7 +276,7 @@ test "comptime @ptrCast a subset of an array, then write through it" { test "@ptrCast undefined value at comptime" { const S = struct { fn transmute(comptime T: type, comptime U: type, value: T) U { - return @ptrCast(*const U, &value).*; + return @as(*const U, @ptrCast(&value)).*; } }; comptime { diff --git a/test/behavior/ptrfromint.zig b/test/behavior/ptrfromint.zig index c07a6df8347a..72244aa7d122 100644 --- a/test/behavior/ptrfromint.zig +++ b/test/behavior/ptrfromint.zig @@ -9,7 +9,7 @@ test "casting integer address to function pointer" { fn addressToFunction() void { var addr: usize = 0xdeadbee0; - _ = @ptrFromInt(*const fn () void, addr); + _ = @as(*const fn () void, @ptrFromInt(addr)); } test "mutate through ptr initialized with constant ptrFromInt value" { @@ -21,7 +21,7 @@ test "mutate through ptr initialized with constant ptrFromInt value" { } fn forceCompilerAnalyzeBranchHardCodedPtrDereference(x: bool) void { - const hardCodedP = @ptrFromInt(*volatile u8, 0xdeadbeef); + const hardCodedP = @as(*volatile u8, @ptrFromInt(0xdeadbeef)); if (x) { hardCodedP.* = hardCodedP.* | 10; } else { @@ -34,7 +34,7 @@ test "@ptrFromInt creates null pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - const ptr = @ptrFromInt(?*u32, 0); + const ptr = @as(?*u32, @ptrFromInt(0)); try expectEqual(@as(?*u32, null), ptr); } @@ -43,6 +43,6 @@ test "@ptrFromInt creates allowzero zero pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - const ptr = @ptrFromInt(*allowzero u32, 0); + const ptr = @as(*allowzero u32, @ptrFromInt(0)); try expectEqual(@as(usize, 0), @intFromPtr(ptr)); } diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index 3657e77e5065..a161be66eb05 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -231,7 +231,7 @@ test "@sizeOf comparison against zero" { test "hardcoded address in typeof expression" { const S = struct { - fn func() @TypeOf(@ptrFromInt(*[]u8, 0x10).*[0]) { + fn func() @TypeOf(@as(*[]u8, @ptrFromInt(0x10)).*[0]) { return 0; } }; @@ -252,7 +252,7 @@ test "array access of generic param in typeof expression" { test "lazy size cast to float" { { const S = struct { a: u8 }; - try expect(@floatFromInt(f32, @sizeOf(S)) == 1.0); + try expect(@as(f32, @floatFromInt(@sizeOf(S))) == 1.0); } { const S = struct { a: u8 }; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index fcbae214ace0..4316aca34fa5 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -129,7 +129,7 @@ test "generic malloc free" { } var some_mem: [100]u8 = undefined; fn memAlloc(comptime T: type, n: usize) anyerror![]T { - return @ptrCast([*]T, &some_mem[0])[0..n]; + return @as([*]T, @ptrCast(&some_mem[0]))[0..n]; } fn memFree(comptime T: type, memory: []T) void { _ = memory; @@ -138,7 +138,7 @@ fn memFree(comptime T: type, memory: []T) void { test "slice of hardcoded address to pointer" { const S = struct { fn doTheTest() !void { - const pointer = @ptrFromInt([*]u8, 0x04)[0..2]; + const pointer = @as([*]u8, @ptrFromInt(0x04))[0..2]; try comptime expect(@TypeOf(pointer) == *[2]u8); const slice: []const u8 = pointer; try expect(@intFromPtr(slice.ptr) == 4); @@ -152,7 +152,7 @@ test "slice of hardcoded address to pointer" { test "comptime slice of pointer preserves comptime var" { comptime { var buff: [10]u8 = undefined; - var a = @ptrCast([*]u8, &buff); + var a = @as([*]u8, @ptrCast(&buff)); a[0..1][0] = 1; try expect(buff[0..][0..][0] == 1); } @@ -161,7 +161,7 @@ test "comptime slice of pointer preserves comptime var" { test "comptime pointer cast array and then slice" { const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; - const ptrA: [*]const u8 = @ptrCast([*]const u8, &array); + const ptrA: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const sliceA: []const u8 = ptrA[0..2]; const ptrB: [*]const u8 = &array; @@ -188,7 +188,7 @@ test "slicing pointer by length" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; - const ptr: [*]const u8 = @ptrCast([*]const u8, &array); + const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array)); const slice = ptr[1..][0..5]; try expect(slice.len == 5); var i: usize = 0; @@ -197,7 +197,7 @@ test "slicing pointer by length" { } } -const x = @ptrFromInt([*]i32, 0x1000)[0..0x500]; +const x = @as([*]i32, @ptrFromInt(0x1000))[0..0x500]; const y = x[0x100..]; test "compile time slice of pointer to hard coded address" { try expect(@intFromPtr(x) == 0x1000); @@ -262,7 +262,7 @@ test "C pointer slice access" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var buf: [10]u32 = [1]u32{42} ** 10; - const c_ptr = @ptrCast([*c]const u32, &buf); + const c_ptr = @as([*c]const u32, @ptrCast(&buf)); var runtime_zero: usize = 0; try comptime expectEqual([]const u32, @TypeOf(c_ptr[runtime_zero..1])); @@ -352,7 +352,7 @@ test "@ptrCast slice to pointer" { fn doTheTest() !void { var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff }; var slice: []align(@alignOf(u16)) u8 = &array; - var ptr = @ptrCast(*u16, slice); + var ptr = @as(*u16, @ptrCast(slice)); try expect(ptr.* == 65535); } }; @@ -837,13 +837,13 @@ test "empty slice ptr is non null" { { const empty_slice: []u8 = &[_]u8{}; const p: [*]u8 = empty_slice.ptr + 0; - const t = @ptrCast([*]i8, p); + const t = @as([*]i8, @ptrCast(p)); try expect(@intFromPtr(t) == @intFromPtr(empty_slice.ptr)); } { const empty_slice: []u8 = &.{}; const p: [*]u8 = empty_slice.ptr + 0; - const t = @ptrCast([*]i8, p); + const t = @as([*]i8, @ptrCast(p)); try expect(@intFromPtr(t) == @intFromPtr(empty_slice.ptr)); } } diff --git a/test/behavior/slice_sentinel_comptime.zig b/test/behavior/slice_sentinel_comptime.zig index 368860547e27..31b7e2349eeb 100644 --- a/test/behavior/slice_sentinel_comptime.zig +++ b/test/behavior/slice_sentinel_comptime.zig @@ -25,7 +25,7 @@ test "comptime slice-sentinel in bounds (unterminated)" { // vector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -41,7 +41,7 @@ test "comptime slice-sentinel in bounds (unterminated)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -82,7 +82,7 @@ test "comptime slice-sentinel in bounds (end,unterminated)" { // vector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{0xff} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..13 :0xff]; _ = slice; } @@ -98,7 +98,7 @@ test "comptime slice-sentinel in bounds (end,unterminated)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{0xff} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..13 :0xff]; _ = slice; } @@ -139,7 +139,7 @@ test "comptime slice-sentinel in bounds (terminated)" { // vector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -155,7 +155,7 @@ test "comptime slice-sentinel in bounds (terminated)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..3 :'d']; _ = slice; } @@ -196,7 +196,7 @@ test "comptime slice-sentinel in bounds (on target sentinel)" { // vector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @as([*]u8, @ptrCast(&buf)); const slice = target[0..14 :0]; _ = slice; } @@ -212,7 +212,7 @@ test "comptime slice-sentinel in bounds (on target sentinel)" { // cvector_ConstPtrSpecialRef comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf)); const slice = target[0..14 :0]; _ = slice; } diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 6ced42998e6b..95b2718efd19 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -92,7 +92,7 @@ test "structs" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var foo: StructFoo = undefined; - @memset(@ptrCast([*]u8, &foo)[0..@sizeOf(StructFoo)], 0); + @memset(@as([*]u8, @ptrCast(&foo))[0..@sizeOf(StructFoo)], 0); foo.a += 1; foo.b = foo.a == 1; try testFoo(foo); @@ -479,14 +479,14 @@ test "runtime struct initialization of bitfield" { .y = x1, }; const s2 = Nibbles{ - .x = @intCast(u4, x2), - .y = @intCast(u4, x2), + .x = @as(u4, @intCast(x2)), + .y = @as(u4, @intCast(x2)), }; try expect(s1.x == x1); try expect(s1.y == x1); - try expect(s2.x == @intCast(u4, x2)); - try expect(s2.y == @intCast(u4, x2)); + try expect(s2.x == @as(u4, @intCast(x2))); + try expect(s2.y == @as(u4, @intCast(x2))); } var x1 = @as(u4, 1); @@ -515,8 +515,8 @@ test "packed struct fields are ordered from LSB to MSB" { var all: u64 = 0x7765443322221111; var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined; - @memcpy(bytes[0..8], @ptrCast([*]u8, &all)); - var bitfields = @ptrCast(*Bitfields, &bytes).*; + @memcpy(bytes[0..8], @as([*]u8, @ptrCast(&all))); + var bitfields = @as(*Bitfields, @ptrCast(&bytes)).*; try expect(bitfields.f1 == 0x1111); try expect(bitfields.f2 == 0x2222); @@ -1281,7 +1281,7 @@ test "packed struct aggregate init" { const S = struct { fn foo(a: i2, b: i6) u8 { - return @bitCast(u8, P{ .a = a, .b = b }); + return @as(u8, @bitCast(P{ .a = a, .b = b })); } const P = packed struct { @@ -1289,7 +1289,7 @@ test "packed struct aggregate init" { b: i6, }; }; - const result = @bitCast(u8, S.foo(1, 2)); + const result = @as(u8, @bitCast(S.foo(1, 2))); try expect(result == 9); } @@ -1365,7 +1365,7 @@ test "under-aligned struct field" { }; var runtime: usize = 1234; const ptr = &S{ .events = 0, .data = .{ .u64 = runtime } }; - const array = @ptrCast(*const [12]u8, ptr); + const array = @as(*const [12]u8, @ptrCast(ptr)); const result = std.mem.readIntNative(u64, array[4..12]); try expect(result == 1234); } diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index bcbfc81ed488..0ae7c510ef0e 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -590,9 +590,9 @@ test "switch on pointer type" { field: u32, }; - const P1 = @ptrFromInt(*X, 0x400); - const P2 = @ptrFromInt(*X, 0x800); - const P3 = @ptrFromInt(*X, 0xC00); + const P1 = @as(*X, @ptrFromInt(0x400)); + const P2 = @as(*X, @ptrFromInt(0x800)); + const P3 = @as(*X, @ptrFromInt(0xC00)); fn doTheTest(arg: *X) i32 { switch (arg) { @@ -682,9 +682,9 @@ test "enum value without tag name used as switch item" { b = 2, _, }; - var e: E = @enumFromInt(E, 0); + var e: E = @as(E, @enumFromInt(0)); switch (e) { - @enumFromInt(E, 0) => {}, + @as(E, @enumFromInt(0)) => {}, .a => return error.TestFailed, .b => return error.TestFailed, _ => return error.TestFailed, diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index a69396c2032a..68e91bfa5819 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -60,7 +60,7 @@ test "cast negative integer to pointer" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - try expectEqual(@ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))), h.MAP_FAILED); + try expectEqual(@as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))), h.MAP_FAILED); } test "casting to union with a macro" { @@ -89,7 +89,7 @@ test "casting or calling a value with a paren-surrounded macro" { const l: c_long = 42; const casted = h.CAST_OR_CALL_WITH_PARENS(c_int, l); - try expect(casted == @intCast(c_int, l)); + try expect(casted == @as(c_int, @intCast(l))); const Helper = struct { fn foo(n: c_int) !void { diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig index 3ea979009e1a..4fc095b66c42 100644 --- a/test/behavior/truncate.zig +++ b/test/behavior/truncate.zig @@ -4,58 +4,58 @@ const expect = std.testing.expect; test "truncate u0 to larger integer allowed and has comptime-known result" { var x: u0 = 0; - const y = @truncate(u8, x); + const y = @as(u8, @truncate(x)); try comptime expect(y == 0); } test "truncate.u0.literal" { - var z = @truncate(u0, 0); + var z = @as(u0, @truncate(0)); try expect(z == 0); } test "truncate.u0.const" { const c0: usize = 0; - var z = @truncate(u0, c0); + var z = @as(u0, @truncate(c0)); try expect(z == 0); } test "truncate.u0.var" { var d: u8 = 2; - var z = @truncate(u0, d); + var z = @as(u0, @truncate(d)); try expect(z == 0); } test "truncate i0 to larger integer allowed and has comptime-known result" { var x: i0 = 0; - const y = @truncate(i8, x); + const y = @as(i8, @truncate(x)); try comptime expect(y == 0); } test "truncate.i0.literal" { - var z = @truncate(i0, 0); + var z = @as(i0, @truncate(0)); try expect(z == 0); } test "truncate.i0.const" { const c0: isize = 0; - var z = @truncate(i0, c0); + var z = @as(i0, @truncate(c0)); try expect(z == 0); } test "truncate.i0.var" { var d: i8 = 2; - var z = @truncate(i0, d); + var z = @as(i0, @truncate(d)); try expect(z == 0); } test "truncate on comptime integer" { - var x = @truncate(u16, 9999); + var x = @as(u16, @truncate(9999)); try expect(x == 9999); - var y = @truncate(u16, -21555); + var y = @as(u16, @truncate(-21555)); try expect(y == 0xabcd); - var z = @truncate(i16, -65537); + var z = @as(i16, @truncate(-65537)); try expect(z == -1); - var w = @truncate(u1, 1 << 100); + var w = @as(u1, @truncate(1 << 100)); try expect(w == 0); } @@ -69,7 +69,7 @@ test "truncate on vectors" { const S = struct { fn doTheTest() !void { var v1: @Vector(4, u16) = .{ 0xaabb, 0xccdd, 0xeeff, 0x1122 }; - var v2 = @truncate(u8, v1); + var v2: @Vector(4, u8) = @truncate(v1); try expect(std.mem.eql(u8, &@as([4]u8, v2), &[4]u8{ 0xbb, 0xdd, 0xff, 0x22 })); } }; diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index ee414365c3e9..e9d3fcd0aae2 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -403,7 +403,7 @@ test "nested runtime conditionals in tuple initializer" { var data: u8 = 0; const x = .{ - if (data != 0) "" else switch (@truncate(u1, data)) { + if (data != 0) "" else switch (@as(u1, @truncate(data))) { 0 => "up", 1 => "down", }, diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig index c053447ccc93..84b04d3e5391 100644 --- a/test/behavior/tuple_declarations.zig +++ b/test/behavior/tuple_declarations.zig @@ -21,7 +21,7 @@ test "tuple declaration type info" { try expectEqualStrings(info.fields[0].name, "0"); try expect(info.fields[0].type == u32); - try expect(@ptrCast(*const u32, @alignCast(@alignOf(u32), info.fields[0].default_value)).* == 1); + try expect(@as(*const u32, @ptrCast(@alignCast(info.fields[0].default_value))).* == 1); try expect(info.fields[0].is_comptime); try expect(info.fields[0].alignment == 2); diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 9420b5d2fdb8..a2ede838b263 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -289,7 +289,7 @@ test "Type.Struct" { try testing.expectEqual(@as(?*const anyopaque, null), infoB.fields[0].default_value); try testing.expectEqualSlices(u8, "y", infoB.fields[1].name); try testing.expectEqual(u32, infoB.fields[1].type); - try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoB.fields[1].default_value.?).*); + try testing.expectEqual(@as(u32, 5), @as(*align(1) const u32, @ptrCast(infoB.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoB.decls.len); try testing.expectEqual(@as(bool, false), infoB.is_tuple); @@ -298,10 +298,10 @@ test "Type.Struct" { try testing.expectEqual(Type.ContainerLayout.Packed, infoC.layout); try testing.expectEqualSlices(u8, "x", infoC.fields[0].name); try testing.expectEqual(u8, infoC.fields[0].type); - try testing.expectEqual(@as(u8, 3), @ptrCast(*const u8, infoC.fields[0].default_value.?).*); + try testing.expectEqual(@as(u8, 3), @as(*const u8, @ptrCast(infoC.fields[0].default_value.?)).*); try testing.expectEqualSlices(u8, "y", infoC.fields[1].name); try testing.expectEqual(u32, infoC.fields[1].type); - try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoC.fields[1].default_value.?).*); + try testing.expectEqual(@as(u32, 5), @as(*align(1) const u32, @ptrCast(infoC.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoC.decls.len); try testing.expectEqual(@as(bool, false), infoC.is_tuple); @@ -311,10 +311,10 @@ test "Type.Struct" { try testing.expectEqual(Type.ContainerLayout.Auto, infoD.layout); try testing.expectEqualSlices(u8, "x", infoD.fields[0].name); try testing.expectEqual(comptime_int, infoD.fields[0].type); - try testing.expectEqual(@as(comptime_int, 3), @ptrCast(*const comptime_int, infoD.fields[0].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 3), @as(*const comptime_int, @ptrCast(infoD.fields[0].default_value.?)).*); try testing.expectEqualSlices(u8, "y", infoD.fields[1].name); try testing.expectEqual(comptime_int, infoD.fields[1].type); - try testing.expectEqual(@as(comptime_int, 5), @ptrCast(*const comptime_int, infoD.fields[1].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 5), @as(*const comptime_int, @ptrCast(infoD.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoD.decls.len); try testing.expectEqual(@as(bool, false), infoD.is_tuple); @@ -324,10 +324,10 @@ test "Type.Struct" { try testing.expectEqual(Type.ContainerLayout.Auto, infoE.layout); try testing.expectEqualSlices(u8, "0", infoE.fields[0].name); try testing.expectEqual(comptime_int, infoE.fields[0].type); - try testing.expectEqual(@as(comptime_int, 1), @ptrCast(*const comptime_int, infoE.fields[0].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 1), @as(*const comptime_int, @ptrCast(infoE.fields[0].default_value.?)).*); try testing.expectEqualSlices(u8, "1", infoE.fields[1].name); try testing.expectEqual(comptime_int, infoE.fields[1].type); - try testing.expectEqual(@as(comptime_int, 2), @ptrCast(*const comptime_int, infoE.fields[1].default_value.?).*); + try testing.expectEqual(@as(comptime_int, 2), @as(*const comptime_int, @ptrCast(infoE.fields[1].default_value.?)).*); try testing.expectEqual(@as(usize, 0), infoE.decls.len); try testing.expectEqual(@as(bool, true), infoE.is_tuple); @@ -379,7 +379,7 @@ test "Type.Enum" { try testing.expectEqual(false, @typeInfo(Bar).Enum.is_exhaustive); try testing.expectEqual(@as(u32, 1), @intFromEnum(Bar.a)); try testing.expectEqual(@as(u32, 5), @intFromEnum(Bar.b)); - try testing.expectEqual(@as(u32, 6), @intFromEnum(@enumFromInt(Bar, 6))); + try testing.expectEqual(@as(u32, 6), @intFromEnum(@as(Bar, @enumFromInt(6)))); } test "Type.Union" { diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 87ae96768aa4..0d026c0078d1 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -113,7 +113,7 @@ fn testNullTerminatedPtr() !void { try expect(ptr_info.Pointer.size == .Many); try expect(ptr_info.Pointer.is_const == false); try expect(ptr_info.Pointer.is_volatile == false); - try expect(@ptrCast(*const u8, ptr_info.Pointer.sentinel.?).* == 0); + try expect(@as(*const u8, @ptrCast(ptr_info.Pointer.sentinel.?)).* == 0); try expect(@typeInfo([:0]u8).Pointer.sentinel != null); } @@ -151,7 +151,7 @@ fn testArray() !void { const info = @typeInfo([10:0]u8); try expect(info.Array.len == 10); try expect(info.Array.child == u8); - try expect(@ptrCast(*const u8, info.Array.sentinel.?).* == @as(u8, 0)); + try expect(@as(*const u8, @ptrCast(info.Array.sentinel.?)).* == @as(u8, 0)); try expect(@sizeOf([10:0]u8) == info.Array.len + 1); } } @@ -295,8 +295,8 @@ fn testStruct() !void { try expect(unpacked_struct_info.Struct.is_tuple == false); try expect(unpacked_struct_info.Struct.backing_integer == null); try expect(unpacked_struct_info.Struct.fields[0].alignment == @alignOf(u32)); - try expect(@ptrCast(*align(1) const u32, unpacked_struct_info.Struct.fields[0].default_value.?).* == 4); - try expect(mem.eql(u8, "foobar", @ptrCast(*align(1) const *const [6:0]u8, unpacked_struct_info.Struct.fields[1].default_value.?).*)); + try expect(@as(*align(1) const u32, @ptrCast(unpacked_struct_info.Struct.fields[0].default_value.?)).* == 4); + try expect(mem.eql(u8, "foobar", @as(*align(1) const *const [6:0]u8, @ptrCast(unpacked_struct_info.Struct.fields[1].default_value.?)).*)); } const TestStruct = struct { @@ -319,7 +319,7 @@ fn testPackedStruct() !void { try expect(struct_info.Struct.fields[0].alignment == 0); try expect(struct_info.Struct.fields[2].type == f32); try expect(struct_info.Struct.fields[2].default_value == null); - try expect(@ptrCast(*align(1) const u32, struct_info.Struct.fields[3].default_value.?).* == 4); + try expect(@as(*align(1) const u32, @ptrCast(struct_info.Struct.fields[3].default_value.?)).* == 4); try expect(struct_info.Struct.fields[3].alignment == 0); try expect(struct_info.Struct.decls.len == 2); try expect(struct_info.Struct.decls[0].is_pub); @@ -504,7 +504,7 @@ test "type info for async frames" { switch (@typeInfo(@Frame(add))) { .Frame => |frame| { - try expect(@ptrCast(@TypeOf(add), frame.function) == add); + try expect(@as(@TypeOf(add), @ptrCast(frame.function)) == add); }, else => unreachable, } @@ -564,7 +564,7 @@ test "typeInfo resolves usingnamespace declarations" { test "value from struct @typeInfo default_value can be loaded at comptime" { comptime { const a = @typeInfo(@TypeOf(.{ .foo = @as(u8, 1) })).Struct.fields[0].default_value; - try expect(@ptrCast(*const u8, a).* == 1); + try expect(@as(*const u8, @ptrCast(a)).* == 1); } } @@ -607,6 +607,6 @@ test "@typeInfo decls ignore dependency loops" { test "type info of tuple of string literal default value" { const struct_field = @typeInfo(@TypeOf(.{"hi"})).Struct.fields[0]; - const value = @ptrCast(*align(1) const *const [2:0]u8, struct_field.default_value.?).*; + const value = @as(*align(1) const *const [2:0]u8, @ptrCast(struct_field.default_value.?)).*; comptime std.debug.assert(value[0] == 'h'); } diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 47864a83c93e..2aab98ea7213 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1244,7 +1244,7 @@ test "@intCast to u0" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var zeros = @Vector(2, u32){ 0, 0 }; - const casted = @intCast(@Vector(2, u0), zeros); + const casted = @as(@Vector(2, u0), @intCast(zeros)); _ = casted[0]; } diff --git a/test/c_abi/main.zig b/test/c_abi/main.zig index f06d45506099..d421f0aacefe 100644 --- a/test/c_abi/main.zig +++ b/test/c_abi/main.zig @@ -143,7 +143,7 @@ export fn zig_longdouble(x: c_longdouble) void { extern fn c_ptr(*anyopaque) void; test "C ABI pointer" { - c_ptr(@ptrFromInt(*anyopaque, 0xdeadbeef)); + c_ptr(@as(*anyopaque, @ptrFromInt(0xdeadbeef))); } export fn zig_ptr(x: *anyopaque) void { @@ -1058,14 +1058,14 @@ test "C function that takes byval struct called via function pointer" { var fn_ptr = &c_func_ptr_byval; fn_ptr( - @ptrFromInt(*anyopaque, 1), - @ptrFromInt(*anyopaque, 2), + @as(*anyopaque, @ptrFromInt(1)), + @as(*anyopaque, @ptrFromInt(2)), ByVal{ .origin = .{ .x = 9, .y = 10, .z = 11 }, .size = .{ .width = 12, .height = 13, .depth = 14 }, }, @as(c_ulong, 3), - @ptrFromInt(*anyopaque, 4), + @as(*anyopaque, @ptrFromInt(4)), @as(c_ulong, 5), ); } @@ -1098,7 +1098,7 @@ test "f80 bare" { if (!has_f80) return error.SkipZigTest; const a = c_f80(12.34); - try expect(@floatCast(f64, a) == 56.78); + try expect(@as(f64, @floatCast(a)) == 56.78); } const f80_struct = extern struct { @@ -1111,7 +1111,7 @@ test "f80 struct" { if (builtin.mode != .Debug) return error.SkipZigTest; const a = c_f80_struct(.{ .a = 12.34 }); - try expect(@floatCast(f64, a.a) == 56.78); + try expect(@as(f64, @floatCast(a.a)) == 56.78); } const f80_extra_struct = extern struct { @@ -1124,7 +1124,7 @@ test "f80 extra struct" { if (builtin.target.cpu.arch == .x86) return error.SkipZigTest; const a = c_f80_extra_struct(.{ .a = 12.34, .b = 42 }); - try expect(@floatCast(f64, a.a) == 56.78); + try expect(@as(f64, @floatCast(a.a)) == 56.78); try expect(a.b == 24); } @@ -1133,7 +1133,7 @@ test "f128 bare" { if (!has_f128) return error.SkipZigTest; const a = c_f128(12.34); - try expect(@floatCast(f64, a) == 56.78); + try expect(@as(f64, @floatCast(a)) == 56.78); } const f128_struct = extern struct { @@ -1144,7 +1144,7 @@ test "f128 struct" { if (!has_f128) return error.SkipZigTest; const a = c_f128_struct(.{ .a = 12.34 }); - try expect(@floatCast(f64, a.a) == 56.78); + try expect(@as(f64, @floatCast(a.a)) == 56.78); } // The stdcall attribute on C functions is ignored when compiled on non-x86 diff --git a/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig b/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig index 1b8e3767b285..25345aced0d5 100644 --- a/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig +++ b/test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig @@ -1,9 +1,10 @@ export fn entry() void { - @alignCast(4, @as(u32, 3)); + const x: *align(8) u32 = @alignCast(@as(u32, 3)); + _ = x; } // error // backend=stage2 // target=native // -// :2:19: error: expected pointer type, found 'u32' +// :2:41: error: expected pointer type, found 'u32' diff --git a/test/cases/compile_errors/bad_alignCast_at_comptime.zig b/test/cases/compile_errors/bad_alignCast_at_comptime.zig index 885700ecac8e..c87052182200 100644 --- a/test/cases/compile_errors/bad_alignCast_at_comptime.zig +++ b/test/cases/compile_errors/bad_alignCast_at_comptime.zig @@ -1,6 +1,6 @@ comptime { - const ptr = @ptrFromInt(*align(1) i32, 0x1); - const aligned = @alignCast(4, ptr); + const ptr: *align(1) i32 = @ptrFromInt(0x1); + const aligned: *align(4) i32 = @alignCast(ptr); _ = aligned; } @@ -8,4 +8,4 @@ comptime { // backend=stage2 // target=native // -// :3:35: error: pointer address 0x1 is not aligned to 4 bytes +// :3:47: error: pointer address 0x1 is not aligned to 4 bytes diff --git a/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig b/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig index 2f7bd9c9bc91..e366e0cb03f5 100644 --- a/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig +++ b/test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig @@ -1,5 +1,5 @@ export fn entry(byte: u8) void { - var oops = @bitCast(u7, byte); + var oops: u7 = @bitCast(byte); _ = oops; } @@ -7,4 +7,4 @@ export fn entry(byte: u8) void { // backend=stage2 // target=native // -// :2:16: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits +// :2:20: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits diff --git a/test/cases/compile_errors/bitCast_to_enum_type.zig b/test/cases/compile_errors/bitCast_to_enum_type.zig index b3bc72c21b96..7f3711b7f11c 100644 --- a/test/cases/compile_errors/bitCast_to_enum_type.zig +++ b/test/cases/compile_errors/bitCast_to_enum_type.zig @@ -1,6 +1,6 @@ export fn entry() void { const E = enum(u32) { a, b }; - const y = @bitCast(E, @as(u32, 3)); + const y: E = @bitCast(@as(u32, 3)); _ = y; } @@ -8,5 +8,5 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:24: error: cannot @bitCast to 'tmp.entry.E' -// :3:24: note: use @enumFromInt to cast from 'u32' +// :3:18: error: cannot @bitCast to 'tmp.entry.E' +// :3:18: note: use @enumFromInt to cast from 'u32' diff --git a/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig b/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig index bf87ba8bc51f..f73dfeb38a46 100644 --- a/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig +++ b/test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig @@ -1,5 +1,5 @@ export fn entry() void { - var foo = (@bitCast(u8, @as(f32, 1.0)) == 0xf); + var foo = (@as(u8, @bitCast(@as(f32, 1.0))) == 0xf); _ = foo; } @@ -7,4 +7,4 @@ export fn entry() void { // backend=stage2 // target=native // -// :2:16: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits +// :2:24: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits diff --git a/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig b/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig index ebd9012015c4..57206b267fc1 100644 --- a/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig +++ b/test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig @@ -1,6 +1,6 @@ comptime { const value: i32 = -1; - const unsigned = @intCast(u32, value); + const unsigned: u32 = @intCast(value); _ = unsigned; } export fn entry1() void { diff --git a/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig b/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig index 73de52fc97f1..4f79da9fb1af 100644 --- a/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig +++ b/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig @@ -1,5 +1,5 @@ export fn entry() void { - @compileLog(@as(*align(1) const anyopaque, @ptrCast(*const anyopaque, &entry))); + @compileLog(@as(*const anyopaque, @ptrCast(&entry))); } // error diff --git a/test/cases/compile_errors/compile_time_null_ptr_cast.zig b/test/cases/compile_errors/compile_time_null_ptr_cast.zig index 25805e9f3521..7d25931aaaad 100644 --- a/test/cases/compile_errors/compile_time_null_ptr_cast.zig +++ b/test/cases/compile_errors/compile_time_null_ptr_cast.zig @@ -1,6 +1,6 @@ comptime { var opt_ptr: ?*i32 = null; - const ptr = @ptrCast(*i32, opt_ptr); + const ptr: *i32 = @ptrCast(opt_ptr); _ = ptr; } diff --git a/test/cases/compile_errors/compile_time_undef_ptr_cast.zig b/test/cases/compile_errors/compile_time_undef_ptr_cast.zig index 14edd293de28..d93e8bc73d1b 100644 --- a/test/cases/compile_errors/compile_time_undef_ptr_cast.zig +++ b/test/cases/compile_errors/compile_time_undef_ptr_cast.zig @@ -1,6 +1,6 @@ comptime { var undef_ptr: *i32 = undefined; - const ptr = @ptrCast(*i32, undef_ptr); + const ptr: *i32 = @ptrCast(undef_ptr); _ = ptr; } diff --git a/test/cases/compile_errors/comptime_call_of_function_pointer.zig b/test/cases/compile_errors/comptime_call_of_function_pointer.zig index d6598aab3929..574f55e9f3d5 100644 --- a/test/cases/compile_errors/comptime_call_of_function_pointer.zig +++ b/test/cases/compile_errors/comptime_call_of_function_pointer.zig @@ -1,5 +1,5 @@ export fn entry() void { - const fn_ptr = @ptrFromInt(*align(1) fn () void, 0xffd2); + const fn_ptr: *align(1) fn () void = @ptrFromInt(0xffd2); comptime fn_ptr(); } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig index ffa21af10aea..83c48e8acd57 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig index c5bb2d9643c4..c111b026a501 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..3 :0]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig index aa52fb975689..24aa36949b46 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..14 :255]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..14 :255]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig index 86bd4ce8bb7a..249d59414afd 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..15 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..15 :0]; _ = slice; } diff --git a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig index e1b8a5bc2deb..a6e599ca3872 100644 --- a/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig +++ b/test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig @@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void { export fn foo_vector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*]u8 = @ptrCast([*]u8, &buf); + var target: [*]u8 = @ptrCast(&buf); const slice = target[0..14 :0]; _ = slice; } @@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void { export fn foo_cvector_ConstPtrSpecialRef() void { comptime { var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10; - var target: [*c]u8 = @ptrCast([*c]u8, &buf); + var target: [*c]u8 = @ptrCast(&buf); const slice = target[0..14 :0]; _ = slice; } diff --git a/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig b/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig index dfef66b6284b..112017d29d88 100644 --- a/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig +++ b/test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig @@ -1,11 +1,11 @@ pub export fn entry() void { const E = enum(u3) { a, b, c, _ }; - @compileLog(@enumFromInt(E, 100)); + @compileLog(@as(E, @enumFromInt(100))); } // error // target=native // backend=stage2 // -// :3:17: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E' +// :3:24: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E' // :2:15: note: enum declared here diff --git a/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig b/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig index 0cf9fcce0112..3e1190cc32d2 100644 --- a/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig +++ b/test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig @@ -3,7 +3,7 @@ const Foo = enum(u32) { B = 11, }; export fn entry() void { - var x = @enumFromInt(Foo, 0); + var x: Foo = @enumFromInt(0); _ = x; } @@ -11,5 +11,5 @@ export fn entry() void { // backend=stage2 // target=native // -// :6:13: error: enum 'tmp.Foo' has no tag with value '0' +// :6:18: error: enum 'tmp.Foo' has no tag with value '0' // :1:13: note: enum declared here diff --git a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig index a3af883198fd..cfb01c3ddca2 100644 --- a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig +++ b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig @@ -2,7 +2,7 @@ const Set1 = error{ A, B }; const Set2 = error{ A, C }; comptime { var x = Set1.B; - var y = @errSetCast(Set2, x); + var y: Set2 = @errSetCast(x); _ = y; } @@ -10,4 +10,4 @@ comptime { // backend=stage2 // target=native // -// :5:13: error: 'error.B' not a member of error set 'error{C,A}' +// :5:19: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig b/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig index 6ae39489a0d8..bb920138e1b3 100644 --- a/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig +++ b/test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig @@ -7,7 +7,7 @@ const Small = enum(u2) { export fn entry() void { var y = @as(f32, 3); - var x = @enumFromInt(Small, y); + var x: Small = @enumFromInt(y); _ = x; } diff --git a/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig b/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig index 9fc8038d7a35..2147fb8aed6b 100644 --- a/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig +++ b/test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig @@ -8,7 +8,7 @@ const foo = Foo{ }; comptime { - const field_ptr = @ptrFromInt(*i32, 0x1234); + const field_ptr: *i32 = @ptrFromInt(0x1234); const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr); _ = another_foo_ptr; } diff --git a/test/cases/compile_errors/field_access_of_opaque_type.zig b/test/cases/compile_errors/field_access_of_opaque_type.zig index f9ec483305cf..7f975c4b0a97 100644 --- a/test/cases/compile_errors/field_access_of_opaque_type.zig +++ b/test/cases/compile_errors/field_access_of_opaque_type.zig @@ -2,7 +2,7 @@ const MyType = opaque {}; export fn entry() bool { var x: i32 = 1; - return bar(@ptrCast(*MyType, &x)); + return bar(@ptrCast(&x)); } fn bar(x: *MyType) bool { diff --git a/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig b/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig index 44405b3c206a..55af9c1185fd 100644 --- a/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig +++ b/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig @@ -2,7 +2,7 @@ pub export fn entry() void { var buf: [5]u8 = .{ 1, 2, 3, 4, 5 }; var slice: []u8 = &buf; const a: u32 = 1234; - @memcpy(slice.ptr, @ptrCast([*]const u8, &a)); + @memcpy(slice.ptr, @as([*]const u8, @ptrCast(&a))); } pub export fn entry1() void { var buf: [5]u8 = .{ 1, 2, 3, 4, 5 }; @@ -39,7 +39,7 @@ pub export fn memset_array() void { // // :5:5: error: unknown @memcpy length // :5:18: note: destination type '[*]u8' provides no length -// :5:24: note: source type '[*]align(4) const u8' provides no length +// :5:24: note: source type '[*]const u8' provides no length // :10:13: error: type '*u8' is not an indexable pointer // :10:13: note: operand must be a slice, a many pointer or a pointer to an array // :15:13: error: type '*u8' is not an indexable pointer diff --git a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig index 8d7e14acae89..22bd90b0684c 100644 --- a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig +++ b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig @@ -1,6 +1,6 @@ export fn entry() u32 { var bytes: [4]u8 = [_]u8{ 0x01, 0x02, 0x03, 0x04 }; - const ptr = @ptrCast(*u32, &bytes[0]); + const ptr: *u32 = @ptrCast(&bytes[0]); return ptr.*; } @@ -8,7 +8,7 @@ export fn entry() u32 { // backend=stage2 // target=native // -// :3:17: error: cast increases pointer alignment +// :3:23: error: cast increases pointer alignment // :3:32: note: '*u8' has alignment '1' -// :3:26: note: '*u32' has alignment '4' -// :3:17: note: consider using '@alignCast' +// :3:23: note: '*u32' has alignment '4' +// :3:23: note: use @alignCast to assert pointer alignment diff --git a/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig b/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig index ecf8f61fc524..772463206992 100644 --- a/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig +++ b/test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig @@ -1,17 +1,17 @@ export fn foo() void { var a: f32 = 2; - _ = @intFromFloat(comptime_int, a); + _ = @as(comptime_int, @intFromFloat(a)); } export fn bar() void { var a: u32 = 2; - _ = @floatFromInt(comptime_float, a); + _ = @as(comptime_float, @floatFromInt(a)); } // error // backend=stage2 // target=native // -// :3:37: error: unable to resolve comptime value -// :3:37: note: value being casted to 'comptime_int' must be comptime-known -// :7:39: error: unable to resolve comptime value -// :7:39: note: value being casted to 'comptime_float' must be comptime-known +// :3:41: error: unable to resolve comptime value +// :3:41: note: value being casted to 'comptime_int' must be comptime-known +// :7:43: error: unable to resolve comptime value +// :7:43: note: value being casted to 'comptime_float' must be comptime-known diff --git a/test/cases/compile_errors/intFromFloat_comptime_safety.zig b/test/cases/compile_errors/intFromFloat_comptime_safety.zig index 275f67006fb6..e3bfc3eb963e 100644 --- a/test/cases/compile_errors/intFromFloat_comptime_safety.zig +++ b/test/cases/compile_errors/intFromFloat_comptime_safety.zig @@ -1,17 +1,17 @@ comptime { - _ = @intFromFloat(i8, @as(f32, -129.1)); + _ = @as(i8, @intFromFloat(@as(f32, -129.1))); } comptime { - _ = @intFromFloat(u8, @as(f32, -1.1)); + _ = @as(u8, @intFromFloat(@as(f32, -1.1))); } comptime { - _ = @intFromFloat(u8, @as(f32, 256.1)); + _ = @as(u8, @intFromFloat(@as(f32, 256.1))); } // error // backend=stage2 // target=native // -// :2:27: error: float value '-129.10000610351562' cannot be stored in integer type 'i8' -// :5:27: error: float value '-1.100000023841858' cannot be stored in integer type 'u8' -// :8:27: error: float value '256.1000061035156' cannot be stored in integer type 'u8' +// :2:31: error: float value '-129.10000610351562' cannot be stored in integer type 'i8' +// :5:31: error: float value '-1.100000023841858' cannot be stored in integer type 'u8' +// :8:31: error: float value '256.1000061035156' cannot be stored in integer type 'u8' diff --git a/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig b/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig index 4a2ea05eaaf2..e443b3daa962 100644 --- a/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig +++ b/test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig @@ -1,5 +1,5 @@ export fn entry() void { - var b = @ptrFromInt(*i32, 0); + var b: *i32 = @ptrFromInt(0); _ = b; } diff --git a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig index 6a1f2db531e9..32f4657ed549 100644 --- a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig +++ b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig @@ -8,7 +8,7 @@ const Set2 = error{ }; comptime { var x = @intFromError(Set1.B); - var y = @errSetCast(Set2, @errorFromInt(x)); + var y: Set2 = @errSetCast(@errorFromInt(x)); _ = y; } @@ -16,4 +16,4 @@ comptime { // backend=llvm // target=native // -// :11:13: error: 'error.B' not a member of error set 'error{C,A}' +// :11:19: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/integer_cast_truncates_bits.zig b/test/cases/compile_errors/integer_cast_truncates_bits.zig index 82eb6b61cf4e..a230dd3e5bae 100644 --- a/test/cases/compile_errors/integer_cast_truncates_bits.zig +++ b/test/cases/compile_errors/integer_cast_truncates_bits.zig @@ -1,6 +1,6 @@ export fn entry1() void { const spartan_count: u16 = 300; - const byte = @intCast(u8, spartan_count); + const byte: u8 = @intCast(spartan_count); _ = byte; } export fn entry2() void { diff --git a/test/cases/compile_errors/integer_underflow_error.zig b/test/cases/compile_errors/integer_underflow_error.zig index 275b593ecc18..49f46ee55803 100644 --- a/test/cases/compile_errors/integer_underflow_error.zig +++ b/test/cases/compile_errors/integer_underflow_error.zig @@ -1,9 +1,9 @@ export fn entry() void { - _ = @ptrFromInt(*anyopaque, ~@as(usize, @import("std").math.maxInt(usize)) - 1); + _ = @as(*anyopaque, @ptrFromInt(~@as(usize, @import("std").math.maxInt(usize)) - 1)); } // error // backend=stage2 // target=native // -// :2:80: error: overflow of integer type 'usize' with value '-1' +// :2:84: error: overflow of integer type 'usize' with value '-1' diff --git a/test/cases/compile_errors/invalid_float_casts.zig b/test/cases/compile_errors/invalid_float_casts.zig index 507ced1e57b4..789eb1097607 100644 --- a/test/cases/compile_errors/invalid_float_casts.zig +++ b/test/cases/compile_errors/invalid_float_casts.zig @@ -1,25 +1,25 @@ export fn foo() void { var a: f32 = 2; - _ = @floatCast(comptime_float, a); + _ = @as(comptime_float, @floatCast(a)); } export fn bar() void { var a: f32 = 2; - _ = @intFromFloat(f32, a); + _ = @as(f32, @intFromFloat(a)); } export fn baz() void { var a: f32 = 2; - _ = @floatFromInt(f32, a); + _ = @as(f32, @floatFromInt(a)); } export fn qux() void { var a: u32 = 2; - _ = @floatCast(f32, a); + _ = @as(f32, @floatCast(a)); } // error // backend=stage2 // target=native // -// :3:36: error: unable to cast runtime value to 'comptime_float' -// :7:23: error: expected integer type, found 'f32' -// :11:28: error: expected integer type, found 'f32' -// :15:25: error: expected float type, found 'u32' +// :3:40: error: unable to cast runtime value to 'comptime_float' +// :7:18: error: expected integer type, found 'f32' +// :11:32: error: expected integer type, found 'f32' +// :15:29: error: expected float type, found 'u32' diff --git a/test/cases/compile_errors/invalid_int_casts.zig b/test/cases/compile_errors/invalid_int_casts.zig index 262a096bd9f7..1e52c5260956 100644 --- a/test/cases/compile_errors/invalid_int_casts.zig +++ b/test/cases/compile_errors/invalid_int_casts.zig @@ -1,25 +1,25 @@ export fn foo() void { var a: u32 = 2; - _ = @intCast(comptime_int, a); + _ = @as(comptime_int, @intCast(a)); } export fn bar() void { var a: u32 = 2; - _ = @floatFromInt(u32, a); + _ = @as(u32, @floatFromInt(a)); } export fn baz() void { var a: u32 = 2; - _ = @intFromFloat(u32, a); + _ = @as(u32, @intFromFloat(a)); } export fn qux() void { var a: f32 = 2; - _ = @intCast(u32, a); + _ = @as(u32, @intCast(a)); } // error // backend=stage2 // target=native // -// :3:32: error: unable to cast runtime value to 'comptime_int' -// :7:23: error: expected float type, found 'u32' -// :11:28: error: expected float type, found 'u32' -// :15:23: error: expected integer or vector, found 'f32' +// :3:36: error: unable to cast runtime value to 'comptime_int' +// :7:18: error: expected float type, found 'u32' +// :11:32: error: expected float type, found 'u32' +// :15:27: error: expected integer or vector, found 'f32' diff --git a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig index 5457a61d3fd0..d7a93edfcda4 100644 --- a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig +++ b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig @@ -8,12 +8,12 @@ const U = union(E) { b, }; export fn foo() void { - var e = @enumFromInt(E, 15); + var e: E = @enumFromInt(15); var u: U = e; _ = u; } export fn bar() void { - const e = @enumFromInt(E, 15); + const e: E = @enumFromInt(15); var u: U = e; _ = u; } @@ -24,5 +24,5 @@ export fn bar() void { // // :12:16: error: runtime coercion to union 'tmp.U' from non-exhaustive enum // :1:11: note: enum declared here -// :17:16: error: union 'tmp.U' has no tag with value '@enumFromInt(tmp.E, 15)' +// :17:16: error: union 'tmp.U' has no tag with value '@enumFromInt(15)' // :6:11: note: union declared here diff --git a/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig b/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig index 7a4c0eb7e858..c6566bb46a0c 100644 --- a/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig +++ b/test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig @@ -1,11 +1,11 @@ export fn foo1() void { var bytes = [_]u8{ 1, 2 }; - const word: u16 = @bitCast(u16, bytes[0..]); + const word: u16 = @bitCast(bytes[0..]); _ = word; } export fn foo2() void { var bytes: []const u8 = &[_]u8{ 1, 2 }; - const word: u16 = @bitCast(u16, bytes); + const word: u16 = @bitCast(bytes); _ = word; } @@ -13,7 +13,7 @@ export fn foo2() void { // backend=stage2 // target=native // -// :3:42: error: cannot @bitCast from '*[2]u8' -// :3:42: note: use @intFromPtr to cast to 'u16' -// :8:37: error: cannot @bitCast from '[]const u8' -// :8:37: note: use @intFromPtr to cast to 'u16' +// :3:37: error: cannot @bitCast from '*[2]u8' +// :3:37: note: use @intFromPtr to cast to 'u16' +// :8:32: error: cannot @bitCast from '[]const u8' +// :8:32: note: use @intFromPtr to cast to 'u16' diff --git a/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig b/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig index baeb3e8c8274..e4952e695132 100644 --- a/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig +++ b/test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig @@ -1,7 +1,7 @@ export fn entry() void { const float: f32 align(@alignOf(i64)) = 5.99999999999994648725e-01; const float_ptr = &float; - const int_ptr = @ptrCast(*const i64, float_ptr); + const int_ptr: *const i64 = @ptrCast(float_ptr); const int_val = int_ptr.*; _ = int_val; } diff --git a/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig b/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig index 0bada117b249..cdbebf54577e 100644 --- a/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig +++ b/test/cases/compile_errors/missing_builtin_arg_in_initializer.zig @@ -1,8 +1,11 @@ comptime { - const v = @as(); + const a = @as(); } comptime { - const u = @bitCast(u32); + const b = @bitCast(); +} +comptime { + const c = @as(u32); } // error @@ -10,4 +13,5 @@ comptime { // target=native // // :2:15: error: expected 2 arguments, found 0 -// :5:15: error: expected 2 arguments, found 1 +// :5:15: error: expected 1 argument, found 0 +// :8:15: error: expected 2 arguments, found 1 diff --git a/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig b/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig index fac51c59c8df..ee0b5e733e7e 100644 --- a/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig +++ b/test/cases/compile_errors/non_float_passed_to_intFromFloat.zig @@ -1,5 +1,5 @@ export fn entry() void { - const x = @intFromFloat(i32, @as(i32, 54)); + const x: i32 = @intFromFloat(@as(i32, 54)); _ = x; } diff --git a/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig b/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig index 63e6753a53fd..c60842e98003 100644 --- a/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig +++ b/test/cases/compile_errors/non_int_passed_to_floatFromInt.zig @@ -1,5 +1,5 @@ export fn entry() void { - const x = @floatFromInt(f32, 1.1); + const x: f32 = @floatFromInt(1.1); _ = x; } diff --git a/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig b/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig index 574ffc5a20ea..d9cfd4b2de17 100644 --- a/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig +++ b/test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig @@ -1,5 +1,5 @@ export fn entry() void { - const x = @intFromFloat(i8, 200); + const x: i8 = @intFromFloat(200); _ = x; } diff --git a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig index f27f5f4f93a8..a704ea456b41 100644 --- a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig +++ b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig @@ -1,6 +1,6 @@ export fn entry() void { const x: i32 = 1234; - const y = @ptrCast(*i32, &x); + const y: *i32 = @ptrCast(&x); _ = y; } @@ -8,5 +8,5 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:15: error: cast discards const qualifier -// :3:15: note: consider using '@constCast' +// :3:21: error: cast discards const qualifier +// :3:21: note: use @constCast to discard const qualifier diff --git a/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig b/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig index f472789affcf..c75ceb444ba8 100644 --- a/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig +++ b/test/cases/compile_errors/ptrFromInt_non_ptr_type.zig @@ -1,15 +1,15 @@ pub export fn entry() void { - _ = @ptrFromInt(i32, 10); + _ = @as(i32, @ptrFromInt(10)); } pub export fn entry2() void { - _ = @ptrFromInt([]u8, 20); + _ = @as([]u8, @ptrFromInt(20)); } // error // backend=stage2 // target=native // -// :2:21: error: expected pointer type, found 'i32' -// :6:21: error: integer cannot be converted to slice type '[]u8' -// :6:21: note: slice length cannot be inferred from address +// :2:18: error: expected pointer type, found 'i32' +// :6:19: error: integer cannot be converted to slice type '[]u8' +// :6:19: note: slice length cannot be inferred from address diff --git a/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig b/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig index c45e998d825e..dfcbf6849c31 100644 --- a/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig +++ b/test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig @@ -1,5 +1,5 @@ pub export fn entry() void { - var y = @ptrFromInt([*]align(4) u8, 5); + var y: [*]align(4) u8 = @ptrFromInt(5); _ = y; } diff --git a/test/cases/compile_errors/ptrcast_to_non-pointer.zig b/test/cases/compile_errors/ptrcast_to_non-pointer.zig index 66a11a602be8..ec93dc12c2b5 100644 --- a/test/cases/compile_errors/ptrcast_to_non-pointer.zig +++ b/test/cases/compile_errors/ptrcast_to_non-pointer.zig @@ -1,9 +1,9 @@ export fn entry(a: *i32) usize { - return @ptrCast(usize, a); + return @ptrCast(a); } // error // backend=llvm // target=native // -// :2:21: error: expected pointer type, found 'usize' +// :2:12: error: expected pointer type, found 'usize' diff --git a/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig b/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig index d3d9b03ff56f..b06b54198485 100644 --- a/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig +++ b/test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig @@ -1,7 +1,7 @@ comptime { const array: [4]u8 = "aoeu".*; const sub_array = array[1..]; - const int_ptr = @ptrCast(*const u24, @alignCast(@alignOf(u24), sub_array)); + const int_ptr: *const u24 = @ptrCast(@alignCast(sub_array)); const deref = int_ptr.*; _ = deref; } diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig index 9b140a0923f6..b26ec702962a 100644 --- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig +++ b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig @@ -7,7 +7,7 @@ const Tag = @Type(.{ }, }); export fn entry() void { - _ = @enumFromInt(Tag, 0); + _ = @as(Tag, @enumFromInt(0)); } // error diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig index b2cd8e1214ee..5d5294ba301e 100644 --- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig +++ b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig @@ -7,7 +7,7 @@ const Tag = @Type(.{ }, }); export fn entry() void { - _ = @enumFromInt(Tag, 0); + _ = @as(Tag, @enumFromInt(0)); } // error diff --git a/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig b/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig index 5fab9c90a921..85fb0065d1e1 100644 --- a/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig +++ b/test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig @@ -1,6 +1,6 @@ export fn foo() void { const bytes align(@alignOf([]const u8)) = [1]u8{0xfa} ** 16; - var value = @ptrCast(*const []const u8, &bytes).*; + var value = @as(*const []const u8, @ptrCast(&bytes)).*; _ = value; } @@ -8,4 +8,4 @@ export fn foo() void { // backend=stage2 // target=native // -// :3:52: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not. +// :3:57: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not. diff --git a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig index df454a38d004..2b45fb6076da 100644 --- a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig +++ b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig @@ -1,6 +1,6 @@ test "enum" { const E = enum(u8) { A, B, _ }; - _ = @tagName(@enumFromInt(E, 5)); + _ = @tagName(@as(E, @enumFromInt(5))); } // error @@ -8,5 +8,5 @@ test "enum" { // target=native // is_test=1 // -// :3:9: error: no field with value '@enumFromInt(tmp.test.enum.E, 5)' in enum 'test.enum.E' +// :3:9: error: no field with value '@enumFromInt(5)' in enum 'test.enum.E' // :2:15: note: declared here diff --git a/test/cases/compile_errors/truncate_sign_mismatch.zig b/test/cases/compile_errors/truncate_sign_mismatch.zig index a05660e28cb4..b34dfa8e0754 100644 --- a/test/cases/compile_errors/truncate_sign_mismatch.zig +++ b/test/cases/compile_errors/truncate_sign_mismatch.zig @@ -1,25 +1,25 @@ export fn entry1() i8 { var x: u32 = 10; - return @truncate(i8, x); + return @truncate(x); } export fn entry2() u8 { var x: i32 = -10; - return @truncate(u8, x); + return @truncate(x); } export fn entry3() i8 { comptime var x: u32 = 10; - return @truncate(i8, x); + return @truncate(x); } export fn entry4() u8 { comptime var x: i32 = -10; - return @truncate(u8, x); + return @truncate(x); } // error // backend=stage2 // target=native // -// :3:26: error: expected signed integer type, found 'u32' -// :7:26: error: expected unsigned integer type, found 'i32' -// :11:26: error: expected signed integer type, found 'u32' -// :15:26: error: expected unsigned integer type, found 'i32' +// :3:22: error: expected signed integer type, found 'u32' +// :7:22: error: expected unsigned integer type, found 'i32' +// :11:22: error: expected signed integer type, found 'u32' +// :15:22: error: expected unsigned integer type, found 'i32' diff --git a/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig b/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig index a050eb6a4c1e..a7c8f0eb72eb 100644 --- a/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig +++ b/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig @@ -2,7 +2,7 @@ const Derp = opaque {}; extern fn bar(d: *Derp) void; export fn foo() void { var x = @as(u8, 1); - bar(@ptrCast(*anyopaque, &x)); + bar(@as(*anyopaque, @ptrCast(&x))); } // error diff --git a/test/cases/enum_values.0.zig b/test/cases/enum_values.0.zig index 2c44a095dde7..71c3e3521abe 100644 --- a/test/cases/enum_values.0.zig +++ b/test/cases/enum_values.0.zig @@ -7,7 +7,7 @@ pub fn main() void { number1; number2; } - const number3 = @enumFromInt(Number, 2); + const number3: Number = @enumFromInt(2); if (@intFromEnum(number3) != 2) { unreachable; } diff --git a/test/cases/enum_values.1.zig b/test/cases/enum_values.1.zig index 1b5a9836db8c..934106dd7959 100644 --- a/test/cases/enum_values.1.zig +++ b/test/cases/enum_values.1.zig @@ -3,7 +3,7 @@ const Number = enum { One, Two, Three }; pub fn main() void { var number1 = Number.One; var number2: Number = .Two; - const number3 = @enumFromInt(Number, 2); + const number3: Number = @enumFromInt(2); assert(number1 != number2); assert(number2 != number3); assert(@intFromEnum(number1) == 0); diff --git a/test/cases/error_in_nested_declaration.zig b/test/cases/error_in_nested_declaration.zig index 710b821e6552..20afacfb681c 100644 --- a/test/cases/error_in_nested_declaration.zig +++ b/test/cases/error_in_nested_declaration.zig @@ -3,7 +3,7 @@ const S = struct { c: i32, a: struct { pub fn str(_: @This(), extra: []u32) []i32 { - return @bitCast([]i32, extra); + return @bitCast(extra); } }, }; @@ -27,5 +27,5 @@ pub export fn entry2() void { // target=native // // :17:12: error: C pointers cannot point to opaque types -// :6:29: error: cannot @bitCast to '[]i32' -// :6:29: note: use @ptrCast to cast from '[]u32' +// :6:20: error: cannot @bitCast to '[]i32' +// :6:20: note: use @ptrCast to cast from '[]u32' diff --git a/test/cases/int_to_ptr.0.zig b/test/cases/int_to_ptr.0.zig index ba14c0380430..09efb8b1a5c8 100644 --- a/test/cases/int_to_ptr.0.zig +++ b/test/cases/int_to_ptr.0.zig @@ -1,8 +1,8 @@ pub fn main() void { - _ = @ptrFromInt(*u8, 0); + _ = @as(*u8, @ptrFromInt(0)); } // error // output_mode=Exe // -// :2:24: error: pointer type '*u8' does not allow address zero +// :2:18: error: pointer type '*u8' does not allow address zero diff --git a/test/cases/int_to_ptr.1.zig b/test/cases/int_to_ptr.1.zig index e75ae81f6f47..d5aed471e173 100644 --- a/test/cases/int_to_ptr.1.zig +++ b/test/cases/int_to_ptr.1.zig @@ -1,7 +1,7 @@ pub fn main() void { - _ = @ptrFromInt(*u32, 2); + _ = @as(*u32, @ptrFromInt(2)); } // error // -// :2:25: error: pointer type '*u32' requires aligned address +// :2:19: error: pointer type '*u32' requires aligned address diff --git a/test/cases/llvm/f_segment_address_space_reading_and_writing.zig b/test/cases/llvm/f_segment_address_space_reading_and_writing.zig index 507362a93787..ddcd41bf162a 100644 --- a/test/cases/llvm/f_segment_address_space_reading_and_writing.zig +++ b/test/cases/llvm/f_segment_address_space_reading_and_writing.zig @@ -34,7 +34,7 @@ pub fn main() void { setFs(@intFromPtr(&test_value)); assert(getFs() == @intFromPtr(&test_value)); - var test_ptr = @ptrFromInt(*allowzero addrspace(.fs) u64, 0); + var test_ptr: *allowzero addrspace(.fs) u64 = @ptrFromInt(0); assert(test_ptr.* == 12345); test_ptr.* = 98765; assert(test_value == 98765); diff --git a/test/cases/llvm/large_slices.zig b/test/cases/llvm/large_slices.zig index f90e588ab0a1..8e9431df8c76 100644 --- a/test/cases/llvm/large_slices.zig +++ b/test/cases/llvm/large_slices.zig @@ -1,5 +1,5 @@ pub fn main() void { - const large_slice = @ptrFromInt([*]const u8, 1)[0..(0xffffffffffffffff >> 3)]; + const large_slice = @as([*]const u8, @ptrFromInt(1))[0..(0xffffffffffffffff >> 3)]; _ = large_slice; } diff --git a/test/cases/safety/@alignCast misaligned.zig b/test/cases/safety/@alignCast misaligned.zig index 538e0ecdf69a..ade27c2747a8 100644 --- a/test/cases/safety/@alignCast misaligned.zig +++ b/test/cases/safety/@alignCast misaligned.zig @@ -16,7 +16,8 @@ pub fn main() !void { } fn foo(bytes: []u8) u32 { const slice4 = bytes[1..5]; - const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4)); + const aligned: *align(4) [4]u8 = @alignCast(slice4); + const int_slice = std.mem.bytesAsSlice(u32, aligned); return int_slice[0]; } // run diff --git a/test/cases/safety/@enumFromInt - no matching tag value.zig b/test/cases/safety/@enumFromInt - no matching tag value.zig index 57f8954f930c..5051869cc0cb 100644 --- a/test/cases/safety/@enumFromInt - no matching tag value.zig +++ b/test/cases/safety/@enumFromInt - no matching tag value.zig @@ -17,7 +17,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: u2) Foo { - return @enumFromInt(Foo, a); + return @enumFromInt(a); } fn baz(_: Foo) void {} diff --git a/test/cases/safety/@errSetCast error not present in destination.zig b/test/cases/safety/@errSetCast error not present in destination.zig index 372bd80aa59a..84aeb7610e9c 100644 --- a/test/cases/safety/@errSetCast error not present in destination.zig +++ b/test/cases/safety/@errSetCast error not present in destination.zig @@ -14,7 +14,7 @@ pub fn main() !void { return error.TestFailed; } fn foo(set1: Set1) Set2 { - return @errSetCast(Set2, set1); + return @errSetCast(set1); } // run // backend=llvm diff --git a/test/cases/safety/@intCast to u0.zig b/test/cases/safety/@intCast to u0.zig index f3f969548bdf..8c9f76e2aa00 100644 --- a/test/cases/safety/@intCast to u0.zig +++ b/test/cases/safety/@intCast to u0.zig @@ -14,7 +14,7 @@ pub fn main() !void { } fn bar(one: u1, not_zero: i32) void { - var x = one << @intCast(u0, not_zero); + var x = one << @as(u0, @intCast(not_zero)); _ = x; } // run diff --git a/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig b/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig index 9a8853c0e9d6..a5a8d831b337 100644 --- a/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig +++ b/test/cases/safety/@intFromFloat cannot fit - negative out of range.zig @@ -12,7 +12,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: f32) i8 { - return @intFromFloat(i8, a); + return @intFromFloat(a); } fn baz(_: i8) void {} // run diff --git a/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig b/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig index caf7bbf0d6ea..1bf1a667659f 100644 --- a/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig +++ b/test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig @@ -12,7 +12,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: f32) u8 { - return @intFromFloat(u8, a); + return @intFromFloat(a); } fn baz(_: u8) void {} // run diff --git a/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig b/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig index d335238b65f2..15a9fa7ad188 100644 --- a/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig +++ b/test/cases/safety/@intFromFloat cannot fit - positive out of range.zig @@ -12,7 +12,7 @@ pub fn main() !void { return error.TestFailed; } fn bar(a: f32) u8 { - return @intFromFloat(u8, a); + return @intFromFloat(a); } fn baz(_: u8) void {} // run diff --git a/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig b/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig index 345d5cfc7407..41cff07e329c 100644 --- a/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig +++ b/test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var zero: usize = 0; - var b = @ptrFromInt(*u8, zero); + var b: *u8 = @ptrFromInt(zero); _ = b; return error.TestFailed; } diff --git a/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig b/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig index e7d3b66d6c0f..92e98d4777d3 100644 --- a/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig +++ b/test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var zero: usize = 0; - var b = @ptrFromInt(*i32, zero); + var b: *i32 = @ptrFromInt(zero); _ = b; return error.TestFailed; } diff --git a/test/cases/safety/@ptrFromInt with misaligned address.zig b/test/cases/safety/@ptrFromInt with misaligned address.zig index c2e1d351eb59..afb8aa7eb853 100644 --- a/test/cases/safety/@ptrFromInt with misaligned address.zig +++ b/test/cases/safety/@ptrFromInt with misaligned address.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var x: usize = 5; - var y = @ptrFromInt([*]align(4) u8, x); + var y: [*]align(4) u8 = @ptrFromInt(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/@tagName on corrupted enum value.zig b/test/cases/safety/@tagName on corrupted enum value.zig index 43af9fbda62c..a541771df157 100644 --- a/test/cases/safety/@tagName on corrupted enum value.zig +++ b/test/cases/safety/@tagName on corrupted enum value.zig @@ -15,7 +15,7 @@ const E = enum(u32) { pub fn main() !void { var e: E = undefined; - @memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55); + @memset(@as([*]u8, @ptrCast(&e))[0..@sizeOf(E)], 0x55); var n = @tagName(e); _ = n; return error.TestFailed; diff --git a/test/cases/safety/@tagName on corrupted union value.zig b/test/cases/safety/@tagName on corrupted union value.zig index a72755abdc3b..dd3d9bd3bffd 100644 --- a/test/cases/safety/@tagName on corrupted union value.zig +++ b/test/cases/safety/@tagName on corrupted union value.zig @@ -15,7 +15,7 @@ const U = union(enum(u32)) { pub fn main() !void { var u: U = undefined; - @memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55); + @memset(@as([*]u8, @ptrCast(&u))[0..@sizeOf(U)], 0x55); var t: @typeInfo(U).Union.tag_type.? = u; var n = @tagName(t); _ = n; diff --git a/test/cases/safety/pointer casting to null function pointer.zig b/test/cases/safety/pointer casting to null function pointer.zig index 7736cb503479..8f399b66dc27 100644 --- a/test/cases/safety/pointer casting to null function pointer.zig +++ b/test/cases/safety/pointer casting to null function pointer.zig @@ -13,7 +13,7 @@ fn getNullPtr() ?*const anyopaque { } pub fn main() !void { const null_ptr: ?*const anyopaque = getNullPtr(); - const required_ptr: *align(1) const fn () void = @ptrCast(*align(1) const fn () void, null_ptr); + const required_ptr: *align(1) const fn () void = @ptrCast(null_ptr); _ = required_ptr; return error.TestFailed; } diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig index a79298b7da08..9052913691de 100644 --- a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig +++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var value: c_short = -1; - var casted = @intCast(u32, value); + var casted: u32 = @intCast(value); _ = casted; return error.TestFailed; } diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig index 6c4e9e256d1f..5d8c3f88c8a3 100644 --- a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig +++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig @@ -13,7 +13,7 @@ pub fn main() !void { return error.TestFailed; } fn unsigned_cast(x: i32) u32 { - return @intCast(u32, x); + return @intCast(x); } // run // backend=llvm diff --git a/test/cases/safety/signed-unsigned vector cast.zig b/test/cases/safety/signed-unsigned vector cast.zig index d287c0a1aee8..60406aa8a37e 100644 --- a/test/cases/safety/signed-unsigned vector cast.zig +++ b/test/cases/safety/signed-unsigned vector cast.zig @@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi pub fn main() !void { var x = @splat(4, @as(i32, -2147483647)); - var y = @intCast(@Vector(4, u32), x); + var y: @Vector(4, u32) = @intCast(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/slice sentinel mismatch - optional pointers.zig b/test/cases/safety/slice sentinel mismatch - optional pointers.zig index 33f4a1099b03..a3b4a98575e6 100644 --- a/test/cases/safety/slice sentinel mismatch - optional pointers.zig +++ b/test/cases/safety/slice sentinel mismatch - optional pointers.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { - var buf: [4]?*i32 = .{ @ptrFromInt(*i32, 4), @ptrFromInt(*i32, 8), @ptrFromInt(*i32, 12), @ptrFromInt(*i32, 16) }; + var buf: [4]?*i32 = .{ @ptrFromInt(4), @ptrFromInt(8), @ptrFromInt(12), @ptrFromInt(16) }; const slice = buf[0..3 :null]; _ = slice; return error.TestFailed; diff --git a/test/cases/safety/switch else on corrupt enum value - one prong.zig b/test/cases/safety/switch else on corrupt enum value - one prong.zig index 2c0b58fcd4ee..c11227c3bee8 100644 --- a/test/cases/safety/switch else on corrupt enum value - one prong.zig +++ b/test/cases/safety/switch else on corrupt enum value - one prong.zig @@ -13,7 +13,7 @@ const E = enum(u32) { }; pub fn main() !void { var a: E = undefined; - @ptrCast(*u32, &a).* = 255; + @as(*u32, @ptrCast(&a)).* = 255; switch (a) { .one => @panic("one"), else => @panic("else"), diff --git a/test/cases/safety/switch else on corrupt enum value - union.zig b/test/cases/safety/switch else on corrupt enum value - union.zig index 358ecc89aca5..a63c78597e66 100644 --- a/test/cases/safety/switch else on corrupt enum value - union.zig +++ b/test/cases/safety/switch else on corrupt enum value - union.zig @@ -18,7 +18,7 @@ const U = union(E) { }; pub fn main() !void { var a: U = undefined; - @ptrCast(*align(@alignOf(U)) u32, &a).* = 0xFFFF_FFFF; + @as(*align(@alignOf(U)) u32, @ptrCast(&a)).* = 0xFFFF_FFFF; switch (a) { .one => @panic("one"), else => @panic("else"), diff --git a/test/cases/safety/switch else on corrupt enum value.zig b/test/cases/safety/switch else on corrupt enum value.zig index af04b7f4c3b1..7e050838c086 100644 --- a/test/cases/safety/switch else on corrupt enum value.zig +++ b/test/cases/safety/switch else on corrupt enum value.zig @@ -13,7 +13,7 @@ const E = enum(u32) { }; pub fn main() !void { var a: E = undefined; - @ptrCast(*u32, &a).* = 255; + @as(*u32, @ptrCast(&a)).* = 255; switch (a) { else => @panic("else"), } diff --git a/test/cases/safety/switch on corrupted enum value.zig b/test/cases/safety/switch on corrupted enum value.zig index 687be0b598b9..f89076191172 100644 --- a/test/cases/safety/switch on corrupted enum value.zig +++ b/test/cases/safety/switch on corrupted enum value.zig @@ -15,7 +15,7 @@ const E = enum(u32) { pub fn main() !void { var e: E = undefined; - @memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55); + @memset(@as([*]u8, @ptrCast(&e))[0..@sizeOf(E)], 0x55); switch (e) { .X, .Y => @breakpoint(), } diff --git a/test/cases/safety/switch on corrupted union value.zig b/test/cases/safety/switch on corrupted union value.zig index 745a3fd037ee..fc93c9d6e7f0 100644 --- a/test/cases/safety/switch on corrupted union value.zig +++ b/test/cases/safety/switch on corrupted union value.zig @@ -15,7 +15,7 @@ const U = union(enum(u32)) { pub fn main() !void { var u: U = undefined; - @memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55); + @memset(@as([*]u8, @ptrCast(&u))[0..@sizeOf(U)], 0x55); switch (u) { .X, .Y => @breakpoint(), } diff --git a/test/cases/safety/truncating vector cast.zig b/test/cases/safety/truncating vector cast.zig index e81a6e64ef52..501bf694acc9 100644 --- a/test/cases/safety/truncating vector cast.zig +++ b/test/cases/safety/truncating vector cast.zig @@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi pub fn main() !void { var x = @splat(4, @as(u32, 0xdeadbeef)); - var y = @intCast(@Vector(4, u16), x); + var y: @Vector(4, u16) = @intCast(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig index f370f76557f4..bd35f354226c 100644 --- a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig +++ b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig @@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi } pub fn main() !void { var value: u8 = 245; - var casted = @intCast(i8, value); + var casted: i8 = @intCast(value); _ = casted; return error.TestFailed; } diff --git a/test/cases/safety/unsigned-signed vector cast.zig b/test/cases/safety/unsigned-signed vector cast.zig index d4b80fb05c2a..cf827878b687 100644 --- a/test/cases/safety/unsigned-signed vector cast.zig +++ b/test/cases/safety/unsigned-signed vector cast.zig @@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi pub fn main() !void { var x = @splat(4, @as(u32, 0x80000000)); - var y = @intCast(@Vector(4, i32), x); + var y: @Vector(4, i32) = @intCast(x); _ = y; return error.TestFailed; } diff --git a/test/cases/safety/value does not fit in shortening cast - u0.zig b/test/cases/safety/value does not fit in shortening cast - u0.zig index 9b303e5cf5c7..ec111a2caedd 100644 --- a/test/cases/safety/value does not fit in shortening cast - u0.zig +++ b/test/cases/safety/value does not fit in shortening cast - u0.zig @@ -14,7 +14,7 @@ pub fn main() !void { return error.TestFailed; } fn shorten_cast(x: u8) u0 { - return @intCast(u0, x); + return @intCast(x); } // run // backend=llvm diff --git a/test/cases/safety/value does not fit in shortening cast.zig b/test/cases/safety/value does not fit in shortening cast.zig index 0e98a0978755..a5ea41659e86 100644 --- a/test/cases/safety/value does not fit in shortening cast.zig +++ b/test/cases/safety/value does not fit in shortening cast.zig @@ -14,7 +14,7 @@ pub fn main() !void { return error.TestFailed; } fn shorten_cast(x: i32) i8 { - return @intCast(i8, x); + return @intCast(x); } // run // backend=llvm diff --git a/test/cbe.zig b/test/cbe.zig index f0cf720fd338..b56202c7e50b 100644 --- a/test/cbe.zig +++ b/test/cbe.zig @@ -642,7 +642,7 @@ pub fn addCases(ctx: *Cases) !void { \\pub export fn main() c_int { \\ var number1 = Number.One; \\ var number2: Number = .Two; - \\ const number3 = @enumFromInt(Number, 2); + \\ const number3: Number = @enumFromInt(2); \\ if (number1 == number2) return 1; \\ if (number2 == number3) return 1; \\ if (@intFromEnum(number1) != 0) return 1; @@ -737,19 +737,19 @@ pub fn addCases(ctx: *Cases) !void { case.addError( \\pub export fn main() c_int { \\ const a = 1; - \\ _ = @enumFromInt(bool, a); + \\ _ = @as(bool, @enumFromInt(a)); \\} , &.{ - ":3:20: error: expected enum, found 'bool'", + ":3:19: error: expected enum, found 'bool'", }); case.addError( \\const E = enum { a, b, c }; \\pub export fn main() c_int { - \\ _ = @enumFromInt(E, 3); + \\ _ = @as(E, @enumFromInt(3)); \\} , &.{ - ":3:9: error: enum 'tmp.E' has no tag with value '3'", + ":3:16: error: enum 'tmp.E' has no tag with value '3'", ":1:11: note: enum declared here", }); diff --git a/test/compare_output.zig b/test/compare_output.zig index 66b56244436c..92dfd76b58b7 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -180,8 +180,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\const c = @cImport(@cInclude("stdlib.h")); \\ \\export fn compare_fn(a: ?*const anyopaque, b: ?*const anyopaque) c_int { - \\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a)); - \\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b)); + \\ const a_int: *const i32 = @ptrCast(@alignCast(a)); + \\ const b_int: *const i32 = @ptrCast(@alignCast(b)); \\ if (a_int.* < b_int.*) { \\ return -1; \\ } else if (a_int.* > b_int.*) { @@ -194,7 +194,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\pub export fn main() c_int { \\ var array = [_]u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; \\ - \\ c.qsort(@ptrCast(?*anyopaque, &array), @intCast(c_ulong, array.len), @sizeOf(i32), compare_fn); + \\ c.qsort(@ptrCast(&array), @intCast(array.len), @sizeOf(i32), compare_fn); \\ \\ for (array, 0..) |item, i| { \\ if (item != i) { @@ -229,8 +229,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ } \\ const small: f32 = 3.25; \\ const x: f64 = small; - \\ const y = @intFromFloat(i32, x); - \\ const z = @floatFromInt(f64, y); + \\ const y: i32 = @intFromFloat(x); + \\ const z: f64 = @floatFromInt(y); \\ _ = c.printf("%.2f\n%d\n%.2f\n%.2f\n", x, y, z, @as(f64, -0.4)); \\ return 0; \\} diff --git a/test/link/macho/dead_strip_dylibs/build.zig b/test/link/macho/dead_strip_dylibs/build.zig index ec073e183a7e..c226e031963a 100644 --- a/test/link/macho/dead_strip_dylibs/build.zig +++ b/test/link/macho/dead_strip_dylibs/build.zig @@ -37,7 +37,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize exe.dead_strip_dylibs = true; const run_cmd = b.addRunArtifact(exe); - run_cmd.expectExitCode(@bitCast(u8, @as(i8, -2))); // should fail + run_cmd.expectExitCode(@as(u8, @bitCast(@as(i8, -2)))); // should fail test_step.dependOn(&run_cmd.step); } } diff --git a/test/nvptx.zig b/test/nvptx.zig index 0bdc9455f76d..c3748570e89c 100644 --- a/test/nvptx.zig +++ b/test/nvptx.zig @@ -60,7 +60,7 @@ pub fn addCases(ctx: *Cases) !void { \\ \\ var _sdata: [1024]f32 addrspace(.shared) = undefined; \\ pub export fn reduceSum(d_x: []const f32, out: *f32) callconv(.Kernel) void { - \\ var sdata = @addrSpaceCast(.generic, &_sdata); + \\ var sdata: *addrspace(.generic) [1024]f32 = @addrSpaceCast(&_sdata); \\ const tid: u32 = threadIdX(); \\ var sum = d_x[tid]; \\ sdata[tid] = sum; diff --git a/test/standalone/hello_world/hello_libc.zig b/test/standalone/hello_world/hello_libc.zig index 42ba4db4b194..992afd736e76 100644 --- a/test/standalone/hello_world/hello_libc.zig +++ b/test/standalone/hello_world/hello_libc.zig @@ -10,6 +10,6 @@ const msg = "Hello, world!\n"; pub export fn main(argc: c_int, argv: **u8) c_int { _ = argv; _ = argc; - if (c.printf(msg) != @intCast(c_int, c.strlen(msg))) return -1; + if (c.printf(msg) != @as(c_int, @intCast(c.strlen(msg)))) return -1; return 0; } diff --git a/test/standalone/issue_11595/main.zig b/test/standalone/issue_11595/main.zig index b91f54cb9c53..12aa6ac3cd73 100644 --- a/test/standalone/issue_11595/main.zig +++ b/test/standalone/issue_11595/main.zig @@ -1,5 +1,5 @@ extern fn check() c_int; pub fn main() u8 { - return @intCast(u8, check()); + return @as(u8, @intCast(check())); } diff --git a/test/standalone/main_return_error/error_u8_non_zero.zig b/test/standalone/main_return_error/error_u8_non_zero.zig index 9f7de780aca1..c45458fb2168 100644 --- a/test/standalone/main_return_error/error_u8_non_zero.zig +++ b/test/standalone/main_return_error/error_u8_non_zero.zig @@ -1,7 +1,7 @@ const Err = error{Foo}; fn foo() u8 { - var x = @intCast(u8, 9); + var x = @as(u8, @intCast(9)); return x; } diff --git a/test/standalone/mix_c_files/main.zig b/test/standalone/mix_c_files/main.zig index 913d284fe905..d755ada04c71 100644 --- a/test/standalone/mix_c_files/main.zig +++ b/test/standalone/mix_c_files/main.zig @@ -25,6 +25,6 @@ pub fn main() anyerror!void { x = add_C(x); x = add_C_zig(x); - const u = @intCast(u32, x); + const u = @as(u32, @intCast(x)); try std.testing.expect(u / 100 == u % 100); } diff --git a/test/standalone/pie/main.zig b/test/standalone/pie/main.zig index 89d204aa1c2a..edf6a3fcaae8 100644 --- a/test/standalone/pie/main.zig +++ b/test/standalone/pie/main.zig @@ -5,7 +5,7 @@ threadlocal var foo: u8 = 42; test "Check ELF header" { // PIE executables are marked as ET_DYN, regular exes as ET_EXEC. - const header = @ptrFromInt(*elf.Ehdr, std.process.getBaseAddress()); + const header = @as(*elf.Ehdr, @ptrFromInt(std.process.getBaseAddress())); try std.testing.expectEqual(elf.ET.DYN, header.e_type); } diff --git a/test/translate_c.zig b/test/translate_c.zig index 966f3e278540..40edec57f73f 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -351,7 +351,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , &[_][]const u8{ \\pub export fn main() void { - \\ var a: c_int = @bitCast(c_int, @truncate(c_uint, @alignOf(c_int))); + \\ var a: c_int = @as(c_int, @bitCast(@as(c_uint, @truncate(@alignOf(c_int))))); \\ _ = @TypeOf(a); \\} }); @@ -465,7 +465,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ pub fn y(self: anytype) @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int) { \\ const Intermediate = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), u8); \\ const ReturnType = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int); - \\ return @ptrCast(ReturnType, @alignCast(@alignOf(c_int), @ptrCast(Intermediate, self) + 4)); + \\ return @as(ReturnType, @ptrCast(@alignCast(@as(Intermediate, @ptrCast(self)) + 4))); \\ } \\}; \\pub const struct_bar = extern struct { @@ -473,7 +473,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ pub fn y(self: anytype) @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int) { \\ const Intermediate = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), u8); \\ const ReturnType = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int); - \\ return @ptrCast(ReturnType, @alignCast(@alignOf(c_int), @ptrCast(Intermediate, self) + 4)); + \\ return @as(ReturnType, @ptrCast(@alignCast(@as(Intermediate, @ptrCast(self)) + 4))); \\ } \\}; }); @@ -635,7 +635,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\}; \\pub export fn foo(arg_x: [*c]outer) void { \\ var x = arg_x; - \\ x.*.unnamed_0.unnamed_0.y = @bitCast(c_int, @as(c_uint, x.*.unnamed_0.x)); + \\ x.*.unnamed_0.unnamed_0.y = @as(c_int, @bitCast(@as(c_uint, x.*.unnamed_0.x))); \\} }); @@ -721,7 +721,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub const struct_opaque_2 = opaque {}; \\pub export fn function(arg_opaque_1: ?*struct_opaque) void { \\ var opaque_1 = arg_opaque_1; - \\ var cast: ?*struct_opaque_2 = @ptrCast(?*struct_opaque_2, opaque_1); + \\ var cast: ?*struct_opaque_2 = @as(?*struct_opaque_2, @ptrCast(opaque_1)); \\ _ = @TypeOf(cast); \\} }); @@ -799,7 +799,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ _ = @TypeOf(b); \\ const c: c_int = undefined; \\ _ = @TypeOf(c); - \\ const d: c_uint = @bitCast(c_uint, @as(c_int, 440)); + \\ const d: c_uint = @as(c_uint, @bitCast(@as(c_int, 440))); \\ _ = @TypeOf(d); \\ var e: c_int = 10; \\ _ = @TypeOf(e); @@ -904,8 +904,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub extern fn foo() void; \\pub export fn bar() void { - \\ var func_ptr: ?*anyopaque = @ptrCast(?*anyopaque, &foo); - \\ var typed_func_ptr: ?*const fn () callconv(.C) void = @ptrFromInt(?*const fn () callconv(.C) void, @intCast(c_ulong, @intFromPtr(func_ptr))); + \\ var func_ptr: ?*anyopaque = @as(?*anyopaque, @ptrCast(&foo)); + \\ var typed_func_ptr: ?*const fn () callconv(.C) void = @as(?*const fn () callconv(.C) void, @ptrFromInt(@as(c_ulong, @intCast(@intFromPtr(func_ptr))))); \\ _ = @TypeOf(typed_func_ptr); \\} }); @@ -1353,7 +1353,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn foo() ?*anyopaque { \\ var x: [*c]c_ushort = undefined; - \\ return @ptrCast(?*anyopaque, x); + \\ return @as(?*anyopaque, @ptrCast(x)); \\} }); @@ -1543,7 +1543,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn ptrcast() [*c]f32 { \\ var a: [*c]c_int = undefined; - \\ return @ptrCast([*c]f32, @alignCast(@import("std").meta.alignment([*c]f32), a)); + \\ return @as([*c]f32, @ptrCast(@alignCast(a))); \\} }); @@ -1555,7 +1555,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn ptrptrcast() [*c][*c]f32 { \\ var a: [*c][*c]c_int = undefined; - \\ return @ptrCast([*c][*c]f32, @alignCast(@import("std").meta.alignment([*c][*c]f32), a)); + \\ return @as([*c][*c]f32, @ptrCast(@alignCast(a))); \\} }); @@ -1579,23 +1579,23 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn test_ptr_cast() void { \\ var p: ?*anyopaque = undefined; \\ { - \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p)); + \\ var to_char: [*c]u8 = @as([*c]u8, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_char); - \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p)); + \\ var to_short: [*c]c_short = @as([*c]c_short, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_short); - \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p)); + \\ var to_int: [*c]c_int = @as([*c]c_int, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_int); - \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p)); + \\ var to_longlong: [*c]c_longlong = @as([*c]c_longlong, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_longlong); \\ } \\ { - \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p)); + \\ var to_char: [*c]u8 = @as([*c]u8, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_char); - \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p)); + \\ var to_short: [*c]c_short = @as([*c]c_short, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_short); - \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p)); + \\ var to_int: [*c]c_int = @as([*c]c_int, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_int); - \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p)); + \\ var to_longlong: [*c]c_longlong = @as([*c]c_longlong, @ptrCast(@alignCast(p))); \\ _ = @TypeOf(to_longlong); \\ } \\} @@ -1651,7 +1651,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , &[_][]const u8{ \\pub export fn foo() c_int { - \\ return (@as(c_int, 1) << @intCast(@import("std").math.Log2Int(c_int), 2)) >> @intCast(@import("std").math.Log2Int(c_int), 1); + \\ return (@as(c_int, 1) << @intCast(2)) >> @intCast(1); \\} }); @@ -1885,7 +1885,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\const enum_unnamed_1 = ++ " " ++ default_enum_type ++ \\; - \\pub export var h: enum_unnamed_1 = @bitCast(c_uint, e); + \\pub export var h: enum_unnamed_1 = @as(c_uint, @bitCast(e)); \\pub const i: c_int = 0; \\pub const j: c_int = 1; \\pub const k: c_int = 2; @@ -2091,12 +2091,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ _ = @TypeOf(c_1); \\ var a_2: c_int = undefined; \\ var b_3: u8 = 123; - \\ b_3 = @bitCast(u8, @truncate(i8, a_2)); + \\ b_3 = @as(u8, @bitCast(@as(i8, @truncate(a_2)))); \\ { \\ var d: c_int = 5; \\ _ = @TypeOf(d); \\ } - \\ var d: c_uint = @bitCast(c_uint, @as(c_int, 440)); + \\ var d: c_uint = @as(c_uint, @bitCast(@as(c_int, 440))); \\ _ = @TypeOf(d); \\} }); @@ -2236,9 +2236,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\int c = 3.1415; \\double d = 3; , &[_][]const u8{ - \\pub export var a: f32 = @floatCast(f32, 3.1415); + \\pub export var a: f32 = @as(f32, @floatCast(3.1415)); \\pub export var b: f64 = 3.1415; - \\pub export var c: c_int = @intFromFloat(c_int, 3.1415); + \\pub export var c: c_int = @as(c_int, @intFromFloat(3.1415)); \\pub export var d: f64 = 3; }); @@ -2423,7 +2423,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , &[_][]const u8{ \\pub export fn int_from_float(arg_a: f32) c_int { \\ var a = arg_a; - \\ return @intFromFloat(c_int, a); + \\ return @as(c_int, @intFromFloat(a)); \\} }); @@ -2533,15 +2533,15 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ var a = arg_a; \\ var b = arg_b; \\ var c = arg_c; - \\ var d: enum_Foo = @bitCast(c_uint, FooA); + \\ var d: enum_Foo = @as(c_uint, @bitCast(FooA)); \\ var e: c_int = @intFromBool((a != 0) and (b != 0)); \\ var f: c_int = @intFromBool((b != 0) and (c != null)); \\ var g: c_int = @intFromBool((a != 0) and (c != null)); \\ var h: c_int = @intFromBool((a != 0) or (b != 0)); \\ var i: c_int = @intFromBool((b != 0) or (c != null)); \\ var j: c_int = @intFromBool((a != 0) or (c != null)); - \\ var k: c_int = @intFromBool((a != 0) or (@bitCast(c_int, d) != 0)); - \\ var l: c_int = @intFromBool((@bitCast(c_int, d) != 0) and (b != 0)); + \\ var k: c_int = @intFromBool((a != 0) or (@as(c_int, @bitCast(d)) != 0)); + \\ var l: c_int = @intFromBool((@as(c_int, @bitCast(d)) != 0) and (b != 0)); \\ var m: c_int = @intFromBool((c != null) or (d != 0)); \\ var td: SomeTypedef = 44; \\ var o: c_int = @intFromBool((td != 0) or (b != 0)); @@ -2707,10 +2707,10 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export var array: [100]c_int = [1]c_int{0} ** 100; \\pub export fn foo(arg_index: c_int) c_int { \\ var index = arg_index; - \\ return array[@intCast(c_uint, index)]; + \\ return array[@as(c_uint, @intCast(index))]; \\} , - \\pub const ACCESS = array[@intCast(usize, @as(c_int, 2))]; + \\pub const ACCESS = array[@as(usize, @intCast(@as(c_int, 2)))]; }); cases.add("cast signed array index to unsigned", @@ -2722,7 +2722,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: [10]c_int = undefined; \\ var i: c_int = 0; - \\ a[@intCast(c_uint, i)] = 0; + \\ a[@as(c_uint, @intCast(i))] = 0; \\} }); @@ -2735,7 +2735,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: [10]c_longlong = undefined; \\ var i: c_longlong = 0; - \\ a[@intCast(usize, i)] = 0; + \\ a[@as(usize, @intCast(i))] = 0; \\} }); @@ -3006,8 +3006,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn log2(arg_a: c_uint) c_int { \\ var a = arg_a; \\ var i: c_int = 0; - \\ while (a > @bitCast(c_uint, @as(c_int, 0))) { - \\ a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ while (a > @as(c_uint, @bitCast(@as(c_int, 0)))) { + \\ a >>= @intCast(@as(c_int, 1)); \\ } \\ return i; \\} @@ -3026,8 +3026,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn log2(arg_a: u32) c_int { \\ var a = arg_a; \\ var i: c_int = 0; - \\ while (a > @bitCast(u32, @as(c_int, 0))) { - \\ a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ while (a > @as(u32, @bitCast(@as(c_int, 0)))) { + \\ a >>= @intCast(@as(c_int, 1)); \\ } \\ return i; \\} @@ -3084,14 +3084,14 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ ref.* ^= @as(c_int, 1); \\ break :blk ref.*; \\ }; - \\ a >>= @intCast(@import("std").math.Log2Int(c_int), blk: { + \\ a >>= @intCast(blk: { \\ const ref = &a; - \\ ref.* >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* >>= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); - \\ a <<= @intCast(@import("std").math.Log2Int(c_int), blk: { + \\ a <<= @intCast(blk: { \\ const ref = &a; - \\ ref.* <<= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* <<= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); \\ a = @divTrunc(a, blk: { @@ -3106,12 +3106,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ }); \\ b /= blk: { \\ const ref = &b; - \\ ref.* /= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* /= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ b %= blk: { \\ const ref = &b; - \\ ref.* %= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* %= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\} @@ -3134,42 +3134,42 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ var a: c_uint = 0; \\ a +%= blk: { \\ const ref = &a; - \\ ref.* +%= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* +%= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a -%= blk: { \\ const ref = &a; - \\ ref.* -%= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* -%= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a *%= blk: { \\ const ref = &a; - \\ ref.* *%= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* *%= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a &= blk: { \\ const ref = &a; - \\ ref.* &= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* &= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a |= blk: { \\ const ref = &a; - \\ ref.* |= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* |= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; \\ a ^= blk: { \\ const ref = &a; - \\ ref.* ^= @bitCast(c_uint, @as(c_int, 1)); + \\ ref.* ^= @as(c_uint, @bitCast(@as(c_int, 1))); \\ break :blk ref.*; \\ }; - \\ a >>= @intCast(@import("std").math.Log2Int(c_uint), blk: { + \\ a >>= @intCast(blk: { \\ const ref = &a; - \\ ref.* >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* >>= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); - \\ a <<= @intCast(@import("std").math.Log2Int(c_uint), blk: { + \\ a <<= @intCast(blk: { \\ const ref = &a; - \\ ref.* <<= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1)); + \\ ref.* <<= @intCast(@as(c_int, 1)); \\ break :blk ref.*; \\ }); \\} @@ -3258,21 +3258,21 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub extern fn fn_bool(x: bool) void; \\pub extern fn fn_ptr(x: ?*anyopaque) void; \\pub export fn call() void { - \\ fn_int(@intFromFloat(c_int, 3.0)); - \\ fn_int(@intFromFloat(c_int, 3.0)); + \\ fn_int(@as(c_int, @intFromFloat(3.0))); + \\ fn_int(@as(c_int, @intFromFloat(3.0))); \\ fn_int(@as(c_int, 1094861636)); - \\ fn_f32(@floatFromInt(f32, @as(c_int, 3))); - \\ fn_f64(@floatFromInt(f64, @as(c_int, 3))); - \\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '3')))); - \\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '\x01')))); - \\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, 0)))); + \\ fn_f32(@as(f32, @floatFromInt(@as(c_int, 3)))); + \\ fn_f64(@as(f64, @floatFromInt(@as(c_int, 3)))); + \\ fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, '3')))))); + \\ fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, '\x01')))))); + \\ fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, 0)))))); \\ fn_f32(3.0); \\ fn_f64(3.0); \\ fn_bool(@as(c_int, 123) != 0); \\ fn_bool(@as(c_int, 0) != 0); \\ fn_bool(@intFromPtr(&fn_int) != 0); - \\ fn_int(@intCast(c_int, @intFromPtr(&fn_int))); - \\ fn_ptr(@ptrFromInt(?*anyopaque, @as(c_int, 42))); + \\ fn_int(@as(c_int, @intCast(@intFromPtr(&fn_int)))); + \\ fn_ptr(@as(?*anyopaque, @ptrFromInt(@as(c_int, 42)))); \\} }); @@ -3411,11 +3411,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , &[_][]const u8{ \\pub export fn foo() c_ulong { - \\ return @bitCast(c_ulong, @as(c_long, -@as(c_int, 1))); + \\ return @as(c_ulong, @bitCast(@as(c_long, -@as(c_int, 1)))); \\} \\pub export fn bar(arg_x: c_long) c_ushort { \\ var x = arg_x; - \\ return @bitCast(c_ushort, @truncate(c_short, x)); + \\ return @as(c_ushort, @bitCast(@as(c_short, @truncate(x)))); \\} }); @@ -3473,11 +3473,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} \\pub export fn bar(arg_a: [*c]const c_int) void { \\ var a = arg_a; - \\ foo(@ptrFromInt([*c]c_int, @intFromPtr(a))); + \\ foo(@as([*c]c_int, @ptrFromInt(@intFromPtr(a)))); \\} \\pub export fn baz(arg_a: [*c]volatile c_int) void { \\ var a = arg_a; - \\ foo(@ptrFromInt([*c]c_int, @intFromPtr(a))); + \\ foo(@as([*c]c_int, @ptrFromInt(@intFromPtr(a)))); \\} }); @@ -3860,9 +3860,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ p[1]; \\} , &[_][]const u8{ - \\_ = p[@intCast(c_uint, @as(c_int, 0))]; + \\_ = p[@as(c_uint, @intCast(@as(c_int, 0)))]; , - \\_ = p[@intCast(c_uint, @as(c_int, 1))]; + \\_ = p[@as(c_uint, @intCast(@as(c_int, 1)))]; }); cases.add("Undefined macro identifier", @@ -3928,7 +3928,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: S = undefined; \\ var b: S = undefined; - \\ var c: c_longlong = @divExact(@bitCast(c_longlong, @intFromPtr(a) -% @intFromPtr(b)), @sizeOf(u8)); + \\ var c: c_longlong = @divExact(@as(c_longlong, @bitCast(@intFromPtr(a) -% @intFromPtr(b))), @sizeOf(u8)); \\ _ = @TypeOf(c); \\} }); @@ -3943,7 +3943,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn foo() void { \\ var a: S = undefined; \\ var b: S = undefined; - \\ var c: c_long = @divExact(@bitCast(c_long, @intFromPtr(a) -% @intFromPtr(b)), @sizeOf(u8)); + \\ var c: c_long = @divExact(@as(c_long, @bitCast(@intFromPtr(a) -% @intFromPtr(b))), @sizeOf(u8)); \\ _ = @TypeOf(c); \\} }); diff --git a/tools/extract-grammar.zig b/tools/extract-grammar.zig index 9a72bf46e4a4..b251f577415b 100644 --- a/tools/extract-grammar.zig +++ b/tools/extract-grammar.zig @@ -90,7 +90,7 @@ fn read(path: []const u8, allocator: mem.Allocator) ![:0]const u8 { const st = try f.stat(); if (st.size > max_src_size) return error.FileTooBig; - const src = try allocator.allocSentinel(u8, @intCast(usize, st.size), 0); + const src = try allocator.allocSentinel(u8, @as(usize, @intCast(st.size)), 0); const n = try f.readAll(src); if (n != st.size) return error.UnexpectedEndOfFile; diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig index 91d0ba80ac0c..9427451a28d8 100644 --- a/tools/gen_spirv_spec.zig +++ b/tools/gen_spirv_spec.zig @@ -40,7 +40,7 @@ fn extendedStructs( kinds: []const g.OperandKind, ) !ExtendedStructSet { var map = ExtendedStructSet.init(arena); - try map.ensureTotalCapacity(@intCast(u32, kinds.len)); + try map.ensureTotalCapacity(@as(u32, @intCast(kinds.len))); for (kinds) |kind| { const enumerants = kind.enumerants orelse continue; diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig index dab45350f99b..1b70023666d3 100644 --- a/tools/gen_stubs.zig +++ b/tools/gen_stubs.zig @@ -441,10 +441,10 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) const sh_name = try arena.dupe(u8, mem.sliceTo(shstrtab[s(shdr.sh_name)..], 0)); log.debug("found section: {s}", .{sh_name}); if (mem.eql(u8, sh_name, ".dynsym")) { - dynsym_index = @intCast(u16, i); + dynsym_index = @as(u16, @intCast(i)); } const gop = try parse.sections.getOrPut(sh_name); - section_index_map[i] = @intCast(u16, gop.index); + section_index_map[i] = @as(u16, @intCast(gop.index)); } if (dynsym_index == 0) @panic("did not find the .dynsym section"); @@ -470,9 +470,9 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) for (copied_dyn_syms) |sym| { const this_section = s(sym.st_shndx); const name = try arena.dupe(u8, mem.sliceTo(dynstr[s(sym.st_name)..], 0)); - const ty = @truncate(u4, sym.st_info); - const binding = @truncate(u4, sym.st_info >> 4); - const visib = @enumFromInt(elf.STV, @truncate(u2, sym.st_other)); + const ty = @as(u4, @truncate(sym.st_info)); + const binding = @as(u4, @truncate(sym.st_info >> 4)); + const visib = @as(elf.STV, @enumFromInt(@as(u2, @truncate(sym.st_other)))); const size = s(sym.st_size); if (parse.blacklist.contains(name)) continue; diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig index e3a3e9294dd9..ef701fc86d2f 100644 --- a/tools/update-linux-headers.zig +++ b/tools/update-linux-headers.zig @@ -112,7 +112,7 @@ const DestTarget = struct { _ = self; var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, a.arch); - return @truncate(u32, hasher.final()); + return @as(u32, @truncate(hasher.final())); } pub fn eql(self: @This(), a: DestTarget, b: DestTarget, b_index: usize) bool { diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index 4584a87ef758..6616d5f077ff 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -591,7 +591,7 @@ pub fn main() anyerror!void { for (all_features, 0..) |feat, i| { const llvm_name = feat.llvm_name orelse continue; - const zig_feat = @enumFromInt(Feature, i); + const zig_feat = @as(Feature, @enumFromInt(i)); const zig_name = @tagName(zig_feat); try llvm_to_zig_cpu_features.put(llvm_name, zig_name); } @@ -790,7 +790,7 @@ const Syntax = union(enum) { }; fn objSyntax(obj: *json.ObjectMap) ?Syntax { - const num_args = @intCast(u8, obj.get("NumArgs").?.integer); + const num_args = @as(u8, @intCast(obj.get("NumArgs").?.integer)); for (obj.get("!superclasses").?.array.items) |superclass_json| { const superclass = superclass_json.string; if (std.mem.eql(u8, superclass, "Joined")) { From a84a8953257ccfb70567a75017c98830eca250e3 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 23 Jun 2023 19:37:50 +0100 Subject: [PATCH 5/7] langref: update to new cast builtin syntax --- doc/langref.html.in | 124 +++++++++++++++++++++++--------------------- 1 file changed, 65 insertions(+), 59 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 0b37db19971f..1ebc737b1873 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2410,7 +2410,7 @@ var some_integers: [100]i32 = undefined; test "modify an array" { for (&some_integers, 0..) |*item, i| { - item.* = @intCast(i32, i); + item.* = @intCast(i); } try expect(some_integers[10] == 10); try expect(some_integers[99] == 99); @@ -2452,8 +2452,8 @@ var fancy_array = init: { var initial_value: [10]Point = undefined; for (&initial_value, 0..) |*pt, i| { pt.* = Point{ - .x = @intCast(i32, i), - .y = @intCast(i32, i) * 2, + .x = @intCast(i), + .y = @intCast(i * 2), }; } break :init initial_value; @@ -2769,7 +2769,7 @@ test "comptime pointers" { const expect = @import("std").testing.expect; test "@intFromPtr and @ptrFromInt" { - const ptr = @ptrFromInt(*i32, 0xdeadbee0); + const ptr: *i32 = @ptrFromInt(0xdeadbee0); const addr = @intFromPtr(ptr); try expect(@TypeOf(addr) == usize); try expect(addr == 0xdeadbee0); @@ -2784,7 +2784,7 @@ test "comptime @ptrFromInt" { comptime { // Zig is able to do this at compile-time, as long as // ptr is never dereferenced. - const ptr = @ptrFromInt(*i32, 0xdeadbee0); + const ptr: *i32 = @ptrFromInt(0xdeadbee0); const addr = @intFromPtr(ptr); try expect(@TypeOf(addr) == usize); try expect(addr == 0xdeadbee0); @@ -2801,7 +2801,7 @@ test "comptime @ptrFromInt" { const expect = @import("std").testing.expect; test "volatile" { - const mmio_ptr = @ptrFromInt(*volatile u8, 0x12345678); + const mmio_ptr: *volatile u8 = @ptrFromInt(0x12345678); try expect(@TypeOf(mmio_ptr) == *volatile u8); } {#code_end#} @@ -2822,7 +2822,7 @@ const expect = std.testing.expect; test "pointer casting" { const bytes align(@alignOf(u32)) = [_]u8{ 0x12, 0x12, 0x12, 0x12 }; - const u32_ptr = @ptrCast(*const u32, &bytes); + const u32_ptr: *const u32 = @ptrCast(&bytes); try expect(u32_ptr.* == 0x12121212); // Even this example is contrived - there are better ways to do the above than @@ -2831,7 +2831,7 @@ test "pointer casting" { try expect(u32_value == 0x12121212); // And even another way, the most straightforward way to do it: - try expect(@bitCast(u32, bytes) == 0x12121212); + try expect(@as(u32, @bitCast(bytes)) == 0x12121212); } test "pointer child type" { @@ -2921,7 +2921,7 @@ test "pointer alignment safety" { } fn foo(bytes: []u8) u32 { const slice4 = bytes[1..5]; - const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4)); + const int_slice = std.mem.bytesAsSlice(u32, @as([]align(4) u8, @alignCast(slice4))); return int_slice[0]; } {#code_end#} @@ -2942,7 +2942,7 @@ const expect = std.testing.expect; test "allowzero" { var zero: usize = 0; - var ptr = @ptrFromInt(*allowzero i32, zero); + var ptr: *allowzero i32 = @ptrFromInt(zero); try expect(@intFromPtr(ptr) == 0); } {#code_end#} @@ -3354,12 +3354,12 @@ fn doTheTest() !void { try expect(@sizeOf(Full) == 2); try expect(@sizeOf(Divided) == 2); var full = Full{ .number = 0x1234 }; - var divided = @bitCast(Divided, full); + var divided: Divided = @bitCast(full); try expect(divided.half1 == 0x34); try expect(divided.quarter3 == 0x2); try expect(divided.quarter4 == 0x1); - var ordered = @bitCast([2]u8, full); + var ordered: [2]u8 = @bitCast(full); switch (native_endian) { .Big => { try expect(ordered[0] == 0x12); @@ -4428,7 +4428,7 @@ fn getNum(u: U) u32 { // `u.a` or `u.b` and `tag` is `u`'s comptime-known tag value. inline else => |num, tag| { if (tag == .b) { - return @intFromFloat(u32, num); + return @intFromFloat(num); } return num; } @@ -4714,7 +4714,7 @@ test "for basics" { var sum2: i32 = 0; for (items, 0..) |_, i| { try expect(@TypeOf(i) == usize); - sum2 += @intCast(i32, i); + sum2 += @as(i32, @intCast(i)); } try expect(sum2 == 10); @@ -6363,7 +6363,7 @@ const mem = std.mem; test "cast *[1][*]const u8 to [*]const ?[*]const u8" { const window_name = [1][*]const u8{"window name"}; const x: [*]const ?[*]const u8 = &window_name; - try expect(mem.eql(u8, std.mem.sliceTo(@ptrCast([*:0]const u8, x[0].?), 0), "window name")); + try expect(mem.eql(u8, std.mem.sliceTo(@as([*:0]const u8, @ptrCast(x[0].?)), 0), "window name")); } {#code_end#} {#header_close#} @@ -6760,8 +6760,8 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 { } test "peer type resolution: *const T and ?*T" { - const a = @ptrFromInt(*const usize, 0x123456780); - const b = @ptrFromInt(?*usize, 0x123456780); + const a: *const usize = @ptrFromInt(0x123456780); + const b: ?*usize = @ptrFromInt(0x123456780); try expect(a == b); try expect(b == a); } @@ -7762,12 +7762,13 @@ test "global assembly" { at compile time.

{#header_open|@addrSpaceCast#} -
{#syntax#}@addrSpaceCast(comptime addrspace: std.builtin.AddressSpace, ptr: anytype) anytype{#endsyntax#}
+
{#syntax#}@addrSpaceCast(ptr: anytype) anytype{#endsyntax#}

- Converts a pointer from one address space to another. Depending on the current target and - address spaces, this cast may be a no-op, a complex operation, or illegal. If the cast is - legal, then the resulting pointer points to the same memory location as the pointer operand. - It is always valid to cast a pointer between the same address spaces. + Converts a pointer from one address space to another. The new address space is inferred + based on the result type. Depending on the current target and address spaces, this cast + may be a no-op, a complex operation, or illegal. If the cast is legal, then the resulting + pointer points to the same memory location as the pointer operand. It is always valid to + cast a pointer between the same address spaces.

{#header_close#} {#header_open|@addWithOverflow#} @@ -7777,10 +7778,10 @@ test "global assembly" {

{#header_close#} {#header_open|@alignCast#} -
{#syntax#}@alignCast(comptime alignment: u29, ptr: anytype) anytype{#endsyntax#}
+
{#syntax#}@alignCast(ptr: anytype) anytype{#endsyntax#}

{#syntax#}ptr{#endsyntax#} can be {#syntax#}*T{#endsyntax#}, {#syntax#}?*T{#endsyntax#}, or {#syntax#}[]T{#endsyntax#}. - It returns the same type as {#syntax#}ptr{#endsyntax#} except with the alignment adjusted to the new value. + Changes the alignment of a pointer. The alignment to use is inferred based on the result type.

A {#link|pointer alignment safety check|Incorrect Pointer Alignment#} is added to the generated code to make sure the pointer is aligned as promised.

@@ -7865,9 +7866,10 @@ comptime { {#header_close#} {#header_open|@bitCast#} -
{#syntax#}@bitCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@bitCast(value: anytype) anytype{#endsyntax#}

- Converts a value of one type to another type. + Converts a value of one type to another type. The return type is the + inferred result type.

Asserts that {#syntax#}@sizeOf(@TypeOf(value)) == @sizeOf(DestType){#endsyntax#}. @@ -8420,10 +8422,11 @@ test "main" { {#header_close#} {#header_open|@errSetCast#} -

{#syntax#}@errSetCast(comptime T: DestType, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@errSetCast(value: anytype) anytype{#endsyntax#}

- Converts an error value from one error set to another error set. Attempting to convert an error - which is not in the destination error set results in safety-protected {#link|Undefined Behavior#}. + Converts an error value from one error set to another error set. The return type is the + inferred result type. Attempting to convert an error which is not in the destination error + set results in safety-protected {#link|Undefined Behavior#}.

{#header_close#} @@ -8535,17 +8538,17 @@ test "decl access by string" { {#header_close#} {#header_open|@floatCast#} -
{#syntax#}@floatCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@floatCast(value: anytype) anytype{#endsyntax#}

Convert from one float type to another. This cast is safe, but may cause the - numeric value to lose precision. + numeric value to lose precision. The return type is the inferred result type.

{#header_close#} {#header_open|@intFromFloat#} -
{#syntax#}@intFromFloat(comptime DestType: type, float: anytype) DestType{#endsyntax#}
+
{#syntax#}@intFromFloat(float: anytype) anytype{#endsyntax#}

- Converts the integer part of a floating point number to the destination type. + Converts the integer part of a floating point number to the inferred result type.

If the integer part of the floating point number cannot fit in the destination type, @@ -8660,16 +8663,17 @@ test "@hasDecl" { {#header_close#} {#header_open|@intCast#} -

{#syntax#}@intCast(comptime DestType: type, int: anytype) DestType{#endsyntax#}
+
{#syntax#}@intCast(int: anytype) anytype{#endsyntax#}

Converts an integer to another integer while keeping the same numerical value. + The return type is the inferred result type. Attempting to convert a number which is out of range of the destination type results in safety-protected {#link|Undefined Behavior#}.

{#code_begin|test_err|test_intCast_builtin|cast truncated bits#} test "integer cast panic" { var a: u16 = 0xabcd; - var b: u8 = @intCast(u8, a); + var b: u8 = @intCast(a); _ = b; } {#code_end#} @@ -8683,9 +8687,9 @@ test "integer cast panic" { {#header_close#} {#header_open|@enumFromInt#} -
{#syntax#}@enumFromInt(comptime DestType: type, integer: anytype) DestType{#endsyntax#}
+
{#syntax#}@enumFromInt(integer: anytype) anytype{#endsyntax#}

- Converts an integer into an {#link|enum#} value. + Converts an integer into an {#link|enum#} value. The return type is the inferred result type.

Attempting to convert an integer which represents no value in the chosen enum type invokes @@ -8711,16 +8715,18 @@ test "integer cast panic" { {#header_close#} {#header_open|@floatFromInt#} -

{#syntax#}@floatFromInt(comptime DestType: type, int: anytype) DestType{#endsyntax#}
+
{#syntax#}@floatFromInt(int: anytype) anytype{#endsyntax#}

- Converts an integer to the closest floating point representation. To convert the other way, use {#link|@intFromFloat#}. This cast is always safe. + Converts an integer to the closest floating point representation. The return type is the inferred result type. + To convert the other way, use {#link|@intFromFloat#}. This cast is always safe.

{#header_close#} {#header_open|@ptrFromInt#} -
{#syntax#}@ptrFromInt(comptime DestType: type, address: usize) DestType{#endsyntax#}
+
{#syntax#}@ptrFromInt(address: usize) anytype{#endsyntax#}

- Converts an integer to a {#link|pointer|Pointers#}. To convert the other way, use {#link|@intFromPtr#}. Casting an address of 0 to a destination type + Converts an integer to a {#link|pointer|Pointers#}. The return type is the inferred result type. + To convert the other way, use {#link|@intFromPtr#}. Casting an address of 0 to a destination type which in not {#link|optional|Optional Pointers#} and does not have the {#syntax#}allowzero{#endsyntax#} attribute will result in a {#link|Pointer Cast Invalid Null#} panic when runtime safety checks are enabled.

@@ -8924,9 +8930,9 @@ pub const PrefetchOptions = struct { {#header_close#} {#header_open|@ptrCast#} -
{#syntax#}@ptrCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
{#syntax#}@ptrCast(value: anytype) anytype{#endsyntax#}

- Converts a pointer of one type to a pointer of another type. + Converts a pointer of one type to a pointer of another type. The return type is the inferred result type.

{#link|Optional Pointers#} are allowed. Casting an optional pointer which is {#link|null#} @@ -9522,10 +9528,10 @@ fn List(comptime T: type) type { {#header_close#} {#header_open|@truncate#} -

{#syntax#}@truncate(comptime T: type, integer: anytype) T{#endsyntax#}
+
{#syntax#}@truncate(integer: anytype) anytype{#endsyntax#}

This function truncates bits from an integer type, resulting in a smaller - or same-sized integer type. + or same-sized integer type. The return type is the inferred result type.

This function always truncates the significant bits of the integer, regardless @@ -9540,7 +9546,7 @@ const expect = std.testing.expect; test "integer truncation" { var a: u16 = 0xabcd; - var b: u8 = @truncate(u8, a); + var b: u8 = @truncate(a); try expect(b == 0xcd); } {#code_end#} @@ -9838,7 +9844,7 @@ fn foo(x: []const u8) u8 { {#code_begin|test_err|test_comptime_invalid_cast|type 'u32' cannot represent integer value '-1'#} comptime { var value: i32 = -1; - const unsigned = @intCast(u32, value); + const unsigned: u32 = @intCast(value); _ = unsigned; } {#code_end#} @@ -9848,7 +9854,7 @@ const std = @import("std"); pub fn main() void { var value: i32 = -1; - var unsigned = @intCast(u32, value); + var unsigned: u32 = @intCast(value); std.debug.print("value: {}\n", .{unsigned}); } {#code_end#} @@ -9861,7 +9867,7 @@ pub fn main() void { {#code_begin|test_err|test_comptime_invalid_cast_truncate|type 'u8' cannot represent integer value '300'#} comptime { const spartan_count: u16 = 300; - const byte = @intCast(u8, spartan_count); + const byte: u8 = @intCast(spartan_count); _ = byte; } {#code_end#} @@ -9871,7 +9877,7 @@ const std = @import("std"); pub fn main() void { var spartan_count: u16 = 300; - const byte = @intCast(u8, spartan_count); + const byte: u8 = @intCast(spartan_count); std.debug.print("value: {}\n", .{byte}); } {#code_end#} @@ -10208,7 +10214,7 @@ const Foo = enum { }; comptime { const a: u2 = 3; - const b = @enumFromInt(Foo, a); + const b: Foo = @enumFromInt(a); _ = b; } {#code_end#} @@ -10224,7 +10230,7 @@ const Foo = enum { pub fn main() void { var a: u2 = 3; - var b = @enumFromInt(Foo, a); + var b: Foo = @enumFromInt(a); std.debug.print("value: {s}\n", .{@tagName(b)}); } {#code_end#} @@ -10242,7 +10248,7 @@ const Set2 = error{ C, }; comptime { - _ = @errSetCast(Set2, Set1.B); + _ = @as(Set2, @errSetCast(Set1.B)); } {#code_end#}

At runtime:

@@ -10261,7 +10267,7 @@ pub fn main() void { foo(Set1.B); } fn foo(set1: Set1) void { - const x = @errSetCast(Set2, set1); + const x = @as(Set2, @errSetCast(set1)); std.debug.print("value: {}\n", .{x}); } {#code_end#} @@ -10271,8 +10277,8 @@ fn foo(set1: Set1) void {

At compile-time:

{#code_begin|test_err|test_comptime_incorrect_pointer_alignment|pointer address 0x1 is not aligned to 4 bytes#} comptime { - const ptr = @ptrFromInt(*align(1) i32, 0x1); - const aligned = @alignCast(4, ptr); + const ptr: *align(1) i32 = @ptrFromInt(0x1); + const aligned: *align(4) i32 = @alignCast(ptr); _ = aligned; } {#code_end#} @@ -10286,7 +10292,7 @@ pub fn main() !void { } fn foo(bytes: []u8) u32 { const slice4 = bytes[1..5]; - const int_slice = mem.bytesAsSlice(u32, @alignCast(4, slice4)); + const int_slice = mem.bytesAsSlice(u32, @as([]align(4) u8, @alignCast(slice4))); return int_slice[0]; } {#code_end#} @@ -10387,7 +10393,7 @@ fn bar(f: *Foo) void { {#code_begin|test_err|test_comptime_invalid_null_pointer_cast|null pointer casted to type#} comptime { const opt_ptr: ?*i32 = null; - const ptr = @ptrCast(*i32, opt_ptr); + const ptr: *i32 = @ptrCast(opt_ptr); _ = ptr; } {#code_end#} @@ -10395,7 +10401,7 @@ comptime { {#code_begin|exe_err|runtime_invalid_null_pointer_cast#} pub fn main() void { var opt_ptr: ?*i32 = null; - var ptr = @ptrCast(*i32, opt_ptr); + var ptr: *i32 = @ptrCast(opt_ptr); _ = ptr; } {#code_end#} From 67997a699a4a1ee20fb189f38077a4bb29c096b3 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 24 Jun 2023 17:01:46 +0100 Subject: [PATCH 6/7] cbe: codegen int_from_ptr of slice correctly CBE was translating to access the `len` field rather than `ptr`. Air.zig specifies that this operation is valid on a slice. --- src/codegen/c.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 317d77602f19..fb4e619e9ebe 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5855,7 +5855,7 @@ fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (operand_ty.isSlice(mod)) { - try f.writeCValueMember(writer, operand, .{ .identifier = "len" }); + try f.writeCValueMember(writer, operand, .{ .identifier = "ptr" }); } else { try f.writeCValue(writer, operand, .Other); } From 21ac0beb436f49fe49c6982a872f2dc48e4bea5e Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 23 Jun 2023 19:55:31 +0100 Subject: [PATCH 7/7] update zig1.wasm Needed due to the breaking changes to casting builtins, which are used by the compiler when building itself. Note from Andrew: I re-ran update-zig1 on my PC and replaced this commit. Signed-off-by: Andrew Kelley --- stage1/zig1.wasm | Bin 2494123 -> 2498705 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index e5d9258b4dc74b11cb5538e92f45ef1a88810f96..d7aa6c83a45f88bce7ce0f6702a643a1d8579914 100644 GIT binary patch delta 825469 zcmb5X2Yghw5-+^cIa};?S&VI5;jB$D)l@_1hlG$yORtbZib>;MZhB&h0RsjM2M`d7 z3B4N(tm&pD4j3?mfWZVz=p8~05P1L5*ia>DPe1GRiKPipF~(2Ux?Z9nn~OD-VLWSJ#2zv3*qhpFI(RUzncvJ&gRz#zYe7#nP!**^?t4-yqss@FqRjgbATH)NP*tjj&_%*1B!fo8J2iIh(Q9n4{rYX$kwi;7|<5`li zI`|m}w^$uYuwpU7of=Cv8aks`g7LVsb!8{xV8R$v6a{73ZI)1Dth24HR!mF`w;H9+ zsv#DOGn`qOl~-mK!x>Vqegd~L4v5{tIz+?v03bB07p1VP%L)*r=T#zEa2l(9=9n$HcN6iI9RNVSwcfYEp~@u zwXq|?sLJeU zIk4p{xSmlOo>zhil(V^0lwT@Rl zQTb%$5WYMy*w{Rf&4^kS#)C81_sJIhGV7gWscz9M`rbqN%PRe>glAtH=1Sp~G)}(m`92f7HL>LKa6&5SwH_3az`dBKr>uq$0JG_FSVdWg^a(c_dA12T>K(NTQY zAtSoVTt0KZQCKA^diSAWE>|5l53xG+oWC<&dTUo*sPUVT(XJ51sZX0fz@=*h0_RFG zsn&k^6I|&=|K#X8kkf62RCkvFLJ0<$c^cQupapM_NQ-oR%?x8uOnts}hOs=RzIrdx zSx0DxwJKssds~oUjg4pX4R>sFwlx2h*rQC%jB=h3)rMGy8Ab88*;4Ulhu~Vq*@V?R z>#8w3u?O#WHQ!!!C=bmUH{O-XEbd@E`|2=bil+1Bnfc|~Ud9IvH&!Ms=69DF?woV`hJsZhJtgV`awFMz8Y`1C6_l9^prB8_gSMXKY=7?y)~WcXmB0X(rsLltXS%DUUXI>&b4^X*S!z-ch1qHn(>Luz$D3?I>*@n*qVXeL0Ssl29RM@=%E?KE&w?#GQ4>;TP+C z1!5oNh;Y?5Ha2seb8!j!DXXcpICl*vF!Pr zrpNNuPh)Gd#_Y0jt65Wi zHOFX>HaBX?Z+?+{>ax->&ZSKveH+re6aQPn#q4ZPpxbsjBf@i)fmk0j>5n|gW7)XU<5@Tn(zP7!;LB_U5 zPD%qlZlAw(lhd3!bGz`@LGma$4^@vfxu!|Tp!>#(_TNxrHP$=vD>LK(o;XuB=(L%# zL2v6(%wxDZL`81jgqlYNW*@T1c%*~OzOq9n{_|{+w~)meTAyfsWj66(Hx2E6yz%{` z;cTrj^3k{sJNH4G%n=is2DvI}@3<>VNfzXqdWOj%G!63^%?<;O4*F+%?l;apn#O)I z+>fP2oy~*8I{K@s_3s|Nqpbf`+QS2)d+f_DPMHL zX!nAf?^tEL{X$Ke<%hlCuCtdWakGY@Y1*6a$ILb_L3>7-y`< z$b;Q(Nj{69-cC!?9PUuAJ5at15U8m(gz!|->Y+x@mm(7ihe99B zK8FYncbxVasEA<7AS36c$g~-Q0tmo8MeE~^L-?8`8eRrLtv-n?z*5%HWyZmm5~^$% z52SZKr$&mD$X!P0%ds(+_M+;>=kpac9ha=4SV|9UaAZ#^iGsdh^V8X!=F_671#(R&tylfQ?9-4POcoS@z+ z*QD+U(!TVfZvP3CcK}d>1<4HkuNsqHNr?G#2s$aC8+3^rpJnWPC83US9Zl_BBq?W< zME*cfe+!aGMdysjR~zvCBaO#ieTgR5DX(_2ZJ33XsDWIGF3*w^?2TD8!TOfKgXfG- zUW0dP?L;Lik&eGLAlKWWHX58zYScuQ%e8jv9wwPv$Gu8p<0r_@xm>oi`fs z!nMX5ZzQt|M*lZz^1>0uyf+%LyGGd?)%mEE279vxA2iBHd9x9py3%;<&15#w$au3R z&mUpTd9x9lWgK|3y0Wg{fH6I)^B+eTHG4EtcJv!yyxJp)uUl<=-=hYfJIa{VqY>Y} z+9>VOB<)~6I>%Qv-Qw0{^*1jA)Zg$9cVq2sVBT`qmDu&S483P0-*wA)si($=uQa~u zSv}(?T4{V)z^J|?JxEZ0Uj@VTdfDATd&7&}xf)ZKcVQr%;U@c0fza;(b-3dt=F(Jf zgKZ?&Ea-y?9WV{h4F6pBDZmbQlFU#x5A_JpRTFxFpuXXW3E@fY6EFQNTCjc78Y=Om zi{!z2Rxhah$P)of$UTdhAFxErGSW;Kwv)y6S* z8<}T_5isCTbXy6%F$PfI?28PYHG^^y%i+`*e%*4wSRYS#LJb;i^n5$A@|4l2XPmFO z2yxLE@^%wmILcW2c5K4Vb(mtk6lS7?0D?wZ4p9-o zPgFx6N*m4GjliIpMCIlgZQqHl@>@Q*D`w%%QJ;1adBynTowz!81Lj`BRh78g1odiA za|u~E(pdIRGM_Ti*#Ay%+rViU(DkLwl|PL}a$Oj+4tEu;1?o<<^N^+JAIjgjt3ak# zC1&{@W8J&4sY~{uH3OReMQu29KO-q}15pz4OSWO{mCV;<8+Cfsi~RKg-c;K9kw!Z3 ze`Fh@di}u9k3$g1Ri9MH|bT`;y z32~cN`q`0~9G$Ml(08>x*=V{m9a|f#?(B`7jfd6@RkpC?F(D#7L$`o~(-Oj*a*wlX zD_Z115`Eb3{$jjT`v*dpe^4zBq(afOnYmevfoal_jYgmYNBlB$3}wzYQ5A@x?+MH; z30H1IRZMm839H|Dh$^3yhRX`01idcy?nzGY%-ds0*C!k{a^L?T=9m#U8kzek*`NpZ z7}^K1e9Q`?)d#U%moE+&8_35>MMsXm)irq$?-fpsYpY1)^XB?mH{f^J{4WuR0_nnUtSky|h$pSa| zgGL%-KW-ke*SB@C)7FLQB_+o1ANRFq)7E8^MUJE06+1)Y$4}~T&t_xiCvKj3-MI2e z0x!L8gnv57R$Ph+sW%xo-kT)7|Mi#CD5{N|A&!0wme< zzrznWEb`ZW9?#eeqx6e&eA-H5>z6J0FB=T@)fn5x?O;&Pcq=2ySo~ENzG9kj|EtFQ zsGLoVXTKh3yRZiYvaW2j^jXlR+0edez^805x_;Bi_M1l%5Qk;o+~I@P8B4xR0OecQlxWF;o9D5|^dCPk}Aiiqz!@iV)P*S>GV{~9B5 zHb+Goi@#4r+&J=mV>VN~b%#Y7H8Yy=Z64!|j83*u9%OAPW#5qTXzZaIK6l3iCCNeV zj>!rp1$c_$`%O{!tS!dsfdzcmZKH4ICO&pkev3g}6h2{T{?M!lh4))&Od0+z+)9R$ zy_|wo+12@kW5$u}1YUf?$jf=eNE@+|Z~M!*IARxq{0$>_GwuBOF>)`Q#4-Iqp=v!H zud{T_Ra_7GNH4gQpO-V0H4AW($dR7AYP_qqVrj1Ab*E{mD^`zLTQF;wMu#~WhPi5UWG63S&Th;D1%Z&l5z$|@x1LSjngPOI))$c4>4nfXvT~GGR}(xo_E^_p72=4 zfRT)=Wi@4!_B$5P$P=iMvjQOp31R@^&$WjjA`mhUOj9kER;-*_>lRY7*JRs` z2BE~25-ww$FIhrK*PLdC^5vC%x{TDQ3?P4z2Yl3dqye(e{{Tqu&uPsD=#O2*r zjnV~p_`dsf)>LEr#I)FdcIV*RzyAU)DWIgy3ZRWM9{J&6-Xn=OHa{kPNl^vwIAhMg&>IiXm;Mq+A2m7R zp-K+(SF-(NSxNWQcq4OirH2UblHiP5W3fqy)~K8FjHYU6!>lh+@vK{D>wmUQKjJS3 zT*4H8Blei`a773D2xY_EDh?EU$G^}|c%Y)?JACx+sSh{$lm}?%`Wt1`)VFMh_Ca5& z$su~r?>2PiKQ^s(rD|ixz`EKAM=+*S^6~10mdBG=Jf=d{ms(<4-i!RqI^$;E<9ziR zqutWh(K||!Bh>Y=Z>k~xBnU?@t!$rr2WHOBL&k!o4LLx+FRjJPOAXty>^e7)Da9g> zVKDY+;<^Zke-s(Km1V={XyB8jhI>Udqik7lowYE`r52Nt%Ph+Q@n;$B%d)V{2+yxw z=h#E!W&cY4E0I@gY9|A)gap~O`JbZw42ZuRzi1malW0|KlyRi5@I?nqQp?YM0iJz8 zN?&RDr>zGb98w{@Ea#4wR4ys9K34-F6MZbtnM}%-2iMH;ai=DbOoj($kxT|2Fg{xm z$?s6Z)vmb%#ggc9W{^3GypW1w3lGX11qVsk5Es=)avFKiSPD*qCA=vPdc9??eyD+# z{c8iwmkne*vNFWpe+zWv#Af4}mCv+Z5-5NaX-*(S7CkKxGR@Z#LQ3*@2uZS^y6TUI z+N12D_V_grGS$awyKK~^PJgB5pb8Ia*R;S&`L#=!m0IAb7Itgy5EpC-J;u%T;q#1A z&r5MMa6-6K=K#@2A~eN~^~RG0UA<$V-loCWUoiiO(N{u5eKG#gf-&6sV7Td#x?Mwn zL36R-nV9Lo`A5reUmYgOI)q5gm92JYv+S_FrijZ{HK;RqmpMY1f6VmrLS*-k^Rc?M zlLmoNdv#c)gLgAcjoC^X{M+43!2FB+NOWqkuy>*|l#qIg|vFuba1nm(M|yx!vgMM9?CGG=aQpD^G)HdP7c zcE%E?RmP6Gro^8bIA}~^bnJ@Nm`}W(7vQ6Hi^N{rm7iKTnFY;WGsxv`OB2rCjddE% zL5kX+`osyxD%0xdVUPa@hgRI(TK8-rS|_IsZKM`QE!|*K8|tBtH^n4)*Pf#4>8|&a zY`&STt=1ZJ`O`Ob;zW3MORbnZu)${Zy$T|#phPAbPj3q5o-O&^x4y3MTbV{)(U<(_ zdZW%S^_5APgN;{z`7r7qhX71->NiFjrN6W!{*l{6mAtI+@HTi=g|=`+0^Fu;U-0rQ zBk@2Lqx<$vwqs<`HIp;?4?oKDn|F(=kPGs=;+7PwD(=qrZ8Ji5JYk#t3nu?tMx`Cm zMxPz9+*TOV0li5Zil$`Ni(M}>t6eT^yV3VMJM*(gjh;LI%MXWIYWh@J@rjpPQi~5CYWz~Np9p{4U7La(yQ@-gVfUvLblIP1e6vSFy6mrIY}yk^ z(9%6={M-@4_3IrzVyF?iw;7*06t#TQ_;GI>AG^j_zxP!lo4l`$@zQTSqjQI%_<*&? z3A0}EuEp~xUy>2CZ-AL$O7t=!4)V!r(E^Cqn}zJ#AIA?LG5*~Dr>&5PRio10M*L?& zercNueF8);i!$;K9JKvH1P)2Cmk6pD*@rqA?y@*Oe2vkj%q!-U=zTT2@~Zj8 zY)9;Ag)^YlK}Ce5qib89`I;SpLm2&6gk4@{yK z3QYdtW370!9A_aII2OI~<8Y8z+_KQrlhwm~gU$trNGfbqr27Sst#Ps#zd^kk&X zEC`TDqv|OqA?u#XkP~mimw43S}m;-RpQSxwb*7O z?(FkK_u1Jo(Z67Mub{L0=wi;*;=9TW{oG}0(!9+>W#NqL>gPJIt9jOa^n z2}HT_Kay(W^>}04mH$vQ>{>0`NO=*$7=1Un}iiZDkil1c1M$)x2?$)wlK zPW<*fWAV*YzIwi7QiyvYxU2E$b(_)T)>EcHy^IxiY8t_R4e{|Cz7lG0@+<#K@?*Cp zzk0WE&OQ&VvC-IYuO9z%p5%FJo}cHP+g_gWGv2=AlaDXCXN^su*M6GSQ`5@IRibkQgKpMrsjPg1d9uEhPS!mTB9!r+hr|&l zd(Z21s*I`c)>Tip5H^U}c9Q-|3*UB`qT~&V6LrGa)3$8#{nRAYwyp5HfjnEoSZzjy zUJgg0duPHUWuEBwGY_o>&m#Hc0I}sXOV_8a@oaXn37nrl;d!GnvoU`2gr`>|MFyYr zd>loQ5hp$Wi>AoLlb-LYP-Nao&!8CAmPOAmgHW|Au92L_4pn!GZVA{t>?G?649anhn0g+XUHHtWL z0EMxNhFP;jY_El?96R7aqcmjplSEKx5!tm^{fK!7|DAe~$y>~>%`Qh@Afo0KEc>s; zEM+z8GskRe}BnDhLDo)z_(fquH-c`B7X#rW!jp79OYmkLh#g{vuZ^Nk+Sq$%r^ zF@Ns>mpe*V-4P*SPF;W{QVLR`PMzjCcT`A-Q$Mv)#+*)l1C}>0MX*z!j5*4i3UcZ+ z!+28;;4ldyx814V#XS(jY);+gj?h=Grj)eT)cWGDrmU_#>nBWgf0T*3%~&%U!EZNX zwNm7I7ezPgmfw)2kc=?Nf4u8YNuy9KZN{2o_=~g6SWOBe(%4uE=ccjN6rN0D-E3ua z#cU%DU(a*R+3XM+B6l8TPw=1V(u{f0?2=qOJjP>exie@vE@!!2aDa-gU0HG%C2>Oa zChn^etbbR`H3;W-WiL?~N;lRHq35w~Y%#YLuR&p(P*{v`J=g~z`@Cz)Lm5YoQSq9JfW#vmOpV`KcAxAu-Juu`x z%hy?~!i!If7kWUYQ2aMNSZzniY3x4W(i1&;vPAJq4>pt}dcG&a)tD7~#HP1cBX*Yj zz?B_zBIMRH_lfYg*Zk=LIfHP&p{ml2 z?m60r{lVk2VY$Fh28$#MF}g6RYKmsjm}{R{axAL0u>X(gJm*J|cswdeeDpsS=NxTj z(X6h)V)Xx5VwfMGv(Ev`IUZG290X;h07SZpJ9s=QF_3|${^u+OcgSvwm%dXpj%KR(AExaVSW7v$AFL%!0T@hyRKKx>qS@E%4SuXljQyGw zx1-@-A&zV>GFL0g)SbR+9U*J$0dCXwi)r7ms92iM0vh6bJwr13@vdjbH<)UaW7CF; z)H|5O2j@a*D~o4lurSXn-?KBE=UnqV)}J*AcJ7@IyT2U&xR!_k!`V#2q-3+WFgDin z=p>eC#W9HI?P&~`nX@pKd;iGdm10DO{>Z-ObM_1C4Az+MG2QPT6fi;#p0b&&BcC%z zB+X)}eA689+$>NP&GAVQD(1{$iQ%`f<@H~3zI;ghI*WP2GjY!82i`s8nK+wuWy~Ya z&S94#FVdM&nEpEjiTXMOt^&`6xoo4tvvTQvUt_+E>KZHlUdSRs*Zw{R_n7spgJWE- zRU%;#>kz)>C-1QdM}HP?5g#wYEQu<57O{B@E<;iBJBu5?Qh8UDFJaGStfsv>4{7Dp zp=SCtT(&@}rBe?^>cl0?p*-XfGi9umxE4S`_1oJqweyf> zW~$KS+|)_KKp#FAOA`-iVx|_*!ox!vJ2Pal+g8YABPS_^zGk&7sG*t4HbqJ`GhWyy zDI1unskl50HuY&`QuKMhNL)QLbz_9&QWtIz;PQ4#TpbfPpj_f=JM|2z^-Z%rDNa2I zsm*(2t!tU7`)2VqO<|TEkhmIV@$*hdHq}k0|cWy_S?Y?(7= zdGH|EGpKm8h-xP3-^G$mRg-is&U3*u(WwtSBa2Qj;Ri}&D&9<;UL`vu&gAfu+3>MW zeSX1U${k}0xFJUhQN_$NV>WTf2#Ge4gV#$IQ6};trZ!}ZG#QRIrLSz(dgU6)Cc;de zGlj2Y=38U-tIN!HdZeTbH*v)#4rU6q&t(&r0Xv1%F zUZGU3DGcltN?j_HV#8h`1w1-PwhC+(O64DrEh+65MIT^l6)yi%38iKUYGbD!hU8SU zw_vPL>W*22^dBnvd6OxO6=M3sGVw}2{O||JcdzLyDTvo6AaE5#Ih__cL8-2OVr)Ju z;D^qL&dXT>&pIbQTF&~?n)JtV_Co63!{z~(eEop848?(IT>jV+m|lBWytjhs|GzWs zTfsi&MW;O-SF)B2m+t#{*g{q&>KDNFDRYRaYoijye~|#}hXPg|L16(Kmoc_KCd|DS zdGjQgPIKG!(K*};IJ<;orys;V1PgMSbZZAxaB+BHIRh(T@RrKF#{*z+;Vb%wD5M~> zg3#(E%pOz$Q@ybA0GQ&1EgxJ#$GxzuAr&zAK4rm&hKM$+VR7b-6unlnY9VWj%z4wQ z=Z*B_tj1#}v&m|U!nJH59~vqe6o4F|Y`9_T(27V*E?~(NZY*Glp6lz_V#d#T#Hs{EFz-S9wJ|zmK)bm_Hpej1qzsz~Scl2$~y|5Uc<}NNt;g z=}rm30$@jK@fFM>N(dGJN(q`_ln^Wc3Q_}RVZ~8GumBL3MJ6Q{04q|bY0^|^0U*kf z1(>xITv(F0B^hQ$tN=D3rqqcV1t}~7B+Qw=BqbIB6{+1jGh}_S5)hkPCMi||LjGjd5DNjN*3wF; zgkT{cLC4XupwL1<`7R8Sb;3eGsk=p3U6c?k1k@HIO<}MQ5SJUKpmHJL`rujpGl&!` z0X6sT-=(Bj38;wWv>Gb3VWs*mFO|j1g@EgYyJQLr0a30n^I;*NB37EMhJ}DqJNHXU zECje#s_$DRQ&#+8sq;B_Pr=vk18oaDAg$U)3!B)?rygECf{G9$L~B+O<-X zmgmU&GBZ`WP5fTUsw%j5CwBi1KS{qOtR>l0LE_Z|EH&e=HK-&fy&IN`>(&&Rf~pj6m1RjaQS^Zxq%wGj*I+m_LK+2LgW)`0mOv2b3ARG| zgefv3^dtnS%M)bEYcQ0Nw#nCmRO>&?6x3vYrZhUCA|%9m+!O^BR3!J1%*{;BeEv)^ zq>MdBx4A~2ii#42hv2ZhUWRE5kd#9#u^Kl1E{(aq!Fr^nSGPD_FdlTe10=!A#k+^F z#|X&YfD)IHlUxSjzheviZ%FgZjJZAm+lIt;7h^mFim&Jk?vU}{ETeFxg4^>f%t1CG`p4(rTrL5oo8_$rx44jRQA zG@Q==NM)s6>B&6IUSM@(Jwd?JlkBc>unsO85W6OkX+4 zqN@et9*%;$o^)b`mdAw=1=niPE%f9QpyPkA6~4lYxWDj3A3pyWY^Oz{^cZWxmWz<% z2ns~|vUu&}L&E!6 zmVjk&1xvu;$5eg!;kiubMr@LzX@ zeL}eUe}d8cBF}SwvbBuo-t|J7Cj3gHt<`o!{&=9rGt6giL+R6 zy2sP?gCjjRZ^4bhXP1fwchD2Rn73HTjS=b@aEGNRl`i8JI1DFD3^0ow`n9WK%{>+$ zJWZ~47}Tm@-Q%Lg`QMpC+`Gp@yOWcPM(<1-xeCSB(CAb!{8Spp`*2ksaExOPuN-&n z#;~MueA6^uFox&TKFy@Z7#@B^ym6n^j5y?QhLadBob|&dbp7rSpS4z-h@$(f8QS^Y zeHPE$ToMMNS*R2+;g-|~H zQ*o6Y*X?zsFQz3eSX8m`?)>jHqPLBwH4dg-kQUsE&17gZOci&J!xt=Hs{8T;Yf5dU z{N3i@sp!{7iC=BJBMTO>cAgf#1#)<=H7ferlj2P~kLR9Q;u||})hLj++{>&z0p6IL z=z!dmNAk|?W%BljQ+D1xA{dH6MHYI&t8a*>9sDU&Z@h!2AlTsGv5~Zm%VhHHdWeSZ z=*Qd+ao)j`I|S>&y~C{Zl?ZNe;@(SqZ=FAhcbUPuty3uWPtJ~^3TlFzoHL=rdM-=X z|Jj_~f_QXrupaa@bqkuTPY{1p9fVat@iw?Ej-e-cFjpV6LFb6Brs;$Nf*up@nD(t}`s8 zlG6f1Cc|yhrDMlQriK+M>8){wU}3c(CH*ZYt*JoAC{8_ zB;}D_D<=)eAf%-8$w_A@DpGO@c9N+<{BADEz#8nt%8CYP+ec5L8vq!#vN@1Svd4pp z%F@Zlry(hc$tuw7t{yZJo1#S#G<4(sm7=GlYr(Gm=;ah%&kbEm;VJGgo}oq1pi`a% zdubL%=qeSe=wiQ^0*Vo>w9$01l_>x{Q zDD2e0M8iN62A#kJ>!%StLwKakgov+0_?tFwVTVI_bGQuTx{Y|E_@)vM79B!)geo~m zSa3LC8KFF~y5s;2MmfG^3Ywk5=k~x{g)p~=+a^4r{LzQzi=Z%G9rJ60FrLsnE{&Hw zP+gLn3>ZudgnO5&LbF0NsuHWkm{qZh{$cz*pWqk5Ah;J-C!9BbG?up~&7R41M<=PB zG3i>Y*+mI>Vg=!1+;*~|r0QTYaL16#zT$scte6?jn`C%1MVgsh$b>0hYMvcR6GK`i z_00~2ijmVk8SN&HrheM9*mWy#&@$A%$Nh(O)0 zP#B>{@Xta5^NZ<~c#QbA5)VqaLb63v$mSm#7*j;%t~U6`f0N$fl|DqYtc<1$5&bLk z$OLB^r|){m^~$a%U_ztS#jZy}+r-k!{532Wp^>~^rFN8kGIL|KOW_lkTNh77@<)Lk z8OfVtzZLW}UEf=;Vx8R`$-nB6gym(2JsoF$_|ySS(RTNv039a^fyD>iQhS*w_cMP456T38Cln8YiaYaHW3 z#fV19s1}{Zp8y1JsI;mkvVCu@p^0mh##;l2DM!)gM$(Wn;S4p)XwHi`apmclWhK|W zSV=8^tRV=f}RKA1VDTx z>X#{}KAD-kSxd|H4CEp%lPUL1)h|Tu8oXJfhR-I%ST$Xr3y7c5WM4T83B4TXnStf3 zo#oA4Kwm{4-3q{p<4@ zv9!S=#UmnWQ1eq>;n=WjsLxl^go}5b=`6e|7@+g`7U_8(vVkoG?t^t z-9!v($WyG6quAY=R~8A4c#zoHkcYA-#G!|Ab=Z@lQzM@Ck1URi>i=;+g>JZvj>?5z zYc=BCHBj<-Te|ycxlyya<0-tW!Y;G8H8f~E>H$cm72h{8O)vzvFigTkA3_nQ>{Wq0?W@Q&Txlft`p_gfVHZFj#-;XS+i9WkmY|EAKrl+1Iudr`R5 z;eJoVH{;U!54hN}Y zs%5*w{VmXQNr*)4X+6a|e6wL@Xc6>uRH>|IQKB);#W*sccoT6jjVH%em3aN~et_gE zfHQT9Hxkj!`4dUs==MG>E%#r;j-siqAbO9Y_*w&^e^XQw{hISmp>0jhlMf(kYq7UE zPY!~2B=;3iv>D%myEERAS*E@Yz}qr9g`#iC=wyoal+hn3+C#&aCOV;nNtAk1OEXgw zDfNcd%uG$7RHv2}LE(Ec^LUD4%Zi-iDEh99j-_bE>oPuu;;+f*Xo|inqd63PMMg(a z^ko?xNzs>NbOc2|lF@96_LkA%6n#oYhf(xJ868T|7i2VxqLDHxsy%}5ceW5U9>GQs zhqn;1KVg$)v>&3hP_Cu+%EFDy zXcp$(caZK|vGs^-!+S+Oi4zO1S@pYEgd3QcCp;tC@OoT*tfSUZtZT~SY{bxEQ6 zeMVP6oLVFeX1vyGX<=~r;JtrH2)A-z>RT8brYZ>gbO9ySE802xGKe*g@q!(TvVkJ`sx|~ zbq8aaRY{&8EecO{V0L2TEna+ri+|4=E2=!n`~QFCU-={-T{#%zqM~p-e@Ef3K81~5 zuvqvM?Xf+VpW;iI?ege}xEvyipW$(KJhX&&ZHy>-hVN-RVGJQ$Kgzo;m#Nq2YYKDU zgi@XMrE)1%?n_OeR4Gz=$wg86EWaLp%ZDlfN^^@to%v6E?iew%3$Gcn_7Z*U6K4bX zhJ)DGg;&EjO2o}BFp=^uiTtj-yKN&*6!cBP*$um=$A#Vvds&d?B4ojyd)=USeEA*E z!smGb4?m4(SKe`A(aVR5|Gva$@YBU2=4IZ-RbD(=BKdia;`Sc$^m>^)7&d{PPha6q z2QSF>eESx6GG2JeGxTkKfYEiPci-h_I2$WEzt8*D9E;CMSdkaz`vACVk%A&3*(Wp% z&>jlecH=uoygu?+nixf#PmsgM0mi4 z3m05wi8ep*pV&a5PUin%k>ZQV@ZyAvs#ACu*2?qA6n>7O^jD|xx{T+n5<_zN9M(mo zPUmksQ^=Brw>pJKdL~bYH;Qf(t26k|Ar#rMgkQAs%kw;Cv#s}bh?rLo9O@a{{xK0{c zvVkYFiVqcwu#Mb7clKj8^6Gp?u4ucFKhAIDioqLsV}2)Btlr40R`DxU5ERJ&Nn-_H zI<6vYn{Xoacdkg@gbXvLiFm~{!{LiW4 zmmOFuCWxv#d0gDsNDT`Q9`J{x!*0Mv!1di6`f34gD|Fwr^>_izVbkY-t%i2J-=2KI-TCfji=i^)0qDv2d-Y#A{`tJuZ z8cwLG5ijlHsl5L*k5L^H;qFkQm2q~tg@pyTu-p( z&3-~M-Sv%lYXE;eBoOI2@hk5sr;JXeJUn37(zyn@qt`)mP}$4OF`8P2NfkW1mhn+? zT$OS8(&cCwoYt)mnMfoeMV~{w4RvVVA%2!LZ1V5?nXu{s-Ur;m!qzbl55h4`(2~gY zEC#?85ptf_Nbq?s6xxc+1r<%j96JPk?iJ6o=lLKS^q##J_&$}Nn&DaaC%^6t?|%ah z!AMsbMpY_1q=?ZM;f^l4$9wS8`$X;gJfHt|MI64*$MIDwR{T$So3TBjlqvi8lzAeL zD|6^v{S`&gpYfFrvB)PC=rxI`kj8=oCLjaqPzBO?T{m>1OUNsplZrBjv3X*oRheJq zen?V~c?ZOb-u@Z3l!A|R^%GreO0QP^Lj#aN7g^Aia926>3GkL2nJg7)Km`%jci;+G zx@(}gWK$BW9eaob?t%cKx^^Yeaf*nzYk=r#SEAnOSHTXvX!E1uElWTlCRD%f%@s(V z!k@r$>#IhD>p@->l`FCa3WpG6ne-4iJ1hR7Y1URy3Db?>;G#lRH5HK{C zfVlu3wzwpd%HmY85?WQqilAA0V|7CCBrQ((^t(6V%`4)ainB`QzZ8kOi#ARroi83K z<~fz-sL@fU5>>6DMaZX)*%zulmzr!YI<(&}9y~J(Vf@dR4H!x`%};$>`d~ z;Yw?MeX{s0T!}*y3=dbb@KtXSUS4+sH|P$BH8fN>%Z5Ri!!K zwR?Ou<-fLz^x3yilYVM4eo^8s0U}I5UNw|Gs#vV4hQ)eLvA9|dWfvBU1{x}Npjh;& z4(sDYF)~y(GvsQ>@K>=|Km?H^%K(v`-aS~XK+bLEhLBORu#g!xoxO1nd zmyGhx?G&Aa$5$2bOixd`@h#EBzo+t^Z-jx!wdVf*w!0O#85nF30Pw;ysBD^Mw8L?Y5 zt_k6DcZ>9z5Ptb?GvnIb|H!z0xA?4{k`#i;y?t+e(oaAuVo5zEPBf{dJj!Pu6<^j; z+*P;WtCP@eoFF>g4t?TJIK!5j9)i@;pM z#pnDa)cVRWTuU5PU+IQRibv}!4Ulp*P{#1z2Z@;tl&%^5rqUH*UB%@VeK1`whc1NboGx!)!IyRrx6xbU73=5=Uv)Ogyw!HkvPESA4c5#ojg1M_qH zU!LqY6>N34ofZ>&D0S&j_op7phv5ftYfu`+Z~)=il4#pg$@?d8&fh)pt3h~^eE)4_ z77H(^xbJ4s^G=HVca<-xDeCu922r@}Lo72tiqc-n2ui;Ho}y7W)Eh2%PoZyRh-&XE z?&#_CS=oU56*yz2H37Id@!9)|BYGJT1l+H{8tW&x{DI=&D>sW}?<-x1qw)cUKf;s*oaL;RXWe(owjdVn$r_@pS7MajVRjb8 z@hdkf4njio@W=6^Cn0(b&zr-Qp+T&k=k!>m5@Ye6+vAir9Gkfo6O@5$idZ{A8O)}7 z9-XMHQy#-CRv3vbF!n5K5o%HRt|g@}<9w5Tkt8oN-G#l3THkUlvSTQjOe`In`59np*Sl8K@I%Hf#`laYfVq* zManF^((zndrW_0r7dI%OqHKdwnOVi94KOwxBD@emkVvH7HLDB4L( z*{DnkzReKd^`_^rzgi;8TC}qd++m#G)-q!oR>hWm}Y7-e4|$U0$VM@p%%r z-ji`(wRaM)j$h|-E4)4Z53SLvH-06_f?47bG4E%TwLo0_S((kwdd3wgjTJaVi?%EE z*#ggv?aB(PsZu#`EaCm|y2?>m@7+#bYD2%4+tT&& zicx6OkNeP<>7qFf%=Ght=*vFzT|#GK$8+nXm{hK`j`nWak}5anGijb!D7Q-Dd^rvr z3NQ)D)>%uKEx;7|Ah6W+VgQN)0p|h8KIPw8K`H<;0|9q^avUO#*MJ@iv|e2v_u%k0 zD4ouu!JGmy{HRhZybuK5y{AntIOW-RRH@EV^H46JWTTLah&DvtD1Il&CzSm58GOx! z+eG9s#ho_S6c%+=vM_oygk6!@S4|}a3HXvqP2MKwl5L{*F{PDy9Ae;Jzfwo+J%;IQ z>>1CEW6EqEIpM6gz9saf?9w2dvz`@sC*k1rNdW<^`e`5n8qbPCh3k}(SZN{hQ$?_C zAi9}nMcY%D76ZB^2iGO^3Bk0375_V>ROfkT#Sf>HPLX?kMV}^$%b+-PR@^wH&v8-b|q66^1OjtCQn|BKROw)C2&=o)fFiD34~$4CFG|N0m$Y=K4}=DYXTe z@&bvM`_Ov{eF*5af#}l!6a@kbee~s^Cyly=P<(S%NzT|CNLTEmJ5PK|eW^?`46`vS zlm`+Y_n~tLJpt(Rf#^H{ZUq7^`{?Hq{c@l!dbTI=oHCh3-u2-w)2en0gF5rP*l}KY zyi$&N_r_EUhn{s_RKK9OBgXl+V1tnxfui>}O3(HiZ{1E6*VGw7Q~vCQXG!e<~jlBL1qf zmBPI1N(14#ro2vx_BWIS&#-G4k6zSBgdRXUOF+&|C6fTrw_rTrhQ5fqtt9i)+eEwD zN~4UT?XYmR`0itpHBkFgnAt#mk1g_?YoNwB=)tPBxtb`SuvULW-N2(NoRU>=RHAxa zlDAr^A2~C3RM>!@wpPDn9&x(0`Y7ur>a|g0h}Yw7)F{TfcwTR-wqkW-k~qA_9JPu` zca4B=#;#dhBk`0*uSfo?nDiv>8b!(0>0(nm^(=8%++L03SZOwQQ0qCfXT!Q#j#sl- zicnxvPlHF*=UC`M|GV=@*BmkOF||ASO&(JxhA#DE;LPz1>8P%huXNiztv;sE;mV-S zYBvr?vMBAU9w)&1ZfXw}G@X8?BS=4A?5X*jn$K`Fa`1UIK9T&oc&Vnb+DiK*9A|cD zH>5+qvF8q9=(@&wB3@8yvFHhR$H^rLQ@cga@g)p>S*+0hsAd%(zo-rYzw0lmMF`fv zq!!}DCF5lkA7p|Bz3vrtSOhFz&yydiGg-6Tzrl=SUKmTZ)MWTI=K|tYK^2aC>C;UL zZO(IvU@i!@%J%>9g+ee5smRo2&9ns9j6TGk`mqnqtFoJ0)FrYrHp^^2y(* zec2CU!Z+$0qU*P6Q^f0htJV|?%=jS0S+3~zty(u|I?ik@DCGxH{;k@UeIcrSr$$%q z+7UO-AdMr*g5z5#2ELos={vPC+bX{PPOXlnnD(9eA2v%gO1H*(>VB^lFpS9K8R{6R z6<_vOzp~O$y*gOUrm+7Ib*giFB_5i=>Bl9@MV&15b=ajtveagkONV2SnD@)*qp}!P zPO&RXeIlMtTKd7e7fv6Kmwt28B;lG%G#;v^HUt8ff2eU|aMceQ1>JMws11Ucb%q7+iRdE#s1o#v#o~E20sxDRKAFPFUXxA;? zHPbHvJfA5Xzr#b%8MqW-)TAuhNIoc}UepTQ3dkw01fWO&+)cklAq2bpPeE{X6I^eJ zIoWE*x>P4>U9`4rU2m8}U6!LzD}(zFKMZxD#^EE>IA4v!a40}DsPSCNgBs^j+N?1i zGa^1g{tM@=K6b{x)_9)3#th_mo2vVI&DWcN)2-S9Dgt4iQVH8X^JhSC!j25+#i=9I z<{5=viY+7r5cszcUgH`55-^2u4N?~o0i-Ua43N5n(x%jTGFy@Sx8(mWbwxj+2fh8& zgF1@j`Q-<6ls&LFP_D#0)K66Bz=BJq^PIb8`DG>wV)}QHm^u<|PuDJ!tTd_1K}ZMm zHjUn}B{{snU>bF}m1)kV4)3ID2Al?O4&MCHsbUVN=@wh!bj>DrL6pEXXk4?A6EUyN z`+^bF`&&5hEOH2o=A+bD%oN>5sd0(Gb(sI#{NIAHwW95Yy!5Q12x2R;Myaob1e+Cf zEu;spmqw{C(_!3`Ickfj&UCvaM7OuJbf){LcnyfA5aV;y7ov0U)3$U0Q?osd2Mii1 zfYr6w!$xD(jah8>`k?3sa^y8j5GbxCo;Sv-je^)FF=~psk>!bQQ`HxEze!^5RD81F z{v@$|sv5`ki<48;YIUyDcn_-h*fF-UwdhuiRbwbNjbojn9hbgzqDY;l*8M-6oeO+a z#r632-o1}aHjf)ZLPA2=EN=lpK}1C1rWLjIfz?`Dt&cveg|^oIthK&2TC`D7qaqU# zH5HVoAW@MGiW(Iq3TUWQqoT%ls#K#=jSBkzp1F57i(>ox{pDk3@4fRlbLPxBXU?2C zeSwFP%^??rXX&DJ>cs+&~j&1f4i*tc)ClIG1fsr+VZ7GYtI zYPM>P&8og)aD}?O+4{M$RejKm$bLzU`esn{}kQ=}GnJ-`ESkX}wx}3vy@4 zdbQ;iYo^(FFJ)GD)CRW>eC=Prdp`T2(6wFqZxjw;!JA(iE14FV zs$l}1Q8`{G_GWr z`L?UZr?Ivew^=_qZT^?8UX+alD+q% zE6!P#cCSsOEF%0FCCobi$3q}0J>RQxWIU-`J?oyoD2d}=ks-eO>6;}nO6uLAo4C$&~w zvTrm&YaDiS{KOou$E6lA&oVVcU;1YD19!>)&rE)qNsf#sfO*i+WVsfLoj$t2y!PG& z7ZZ%pNmre<%o?70FIx->)RS*p?i;BhTUrY`4|ik^zBvpkNOt4b{K+avMK3VUH_ik1 zjU(CL)(0u&y|1i}N;yt6 zwWD$*PH!i5R1P*DyheTAVvVi(0KVg4e9G_$L?7kg%k#u3jyXP6S#3Ul zqpH2rsxs#z`+J-ou2T>PjTN@(bVi?Wn?7i+~&UA1#lWv#mHE-UHWE-DLqi1%0n z3+*|AN`}Bi@TtvRmF4QZrz`W!YZfbSrBzwn*||^_7ug(lJzY7}ctjny(yAfZ#*<*G z@R`aVn4iOso~azKDqF1qAY)vs^~*S}2emPANW=xPq?233A5r5wE0cy&vpcDXbNHQ= zgH_qxR&hC6Cj`y?q3$jqt?aDaL;d1zjsek!>dloy)VjN2>`$rs&6N|3FI4MhKxkg9 zPPhj?@2U$V*`b>6v3|_S{?G5R{+tI@_r$<@iTdF0m3vycqqWagj;4<9JzH6${(Y}C zw_qpKB_={h@lo~avjBC;s-Vq@uY!|TS6SoD+ZU^%`*bT4@8e$XO9YK^XhCh&I5VQZcwec9R%`0~YPEG-WeIQlbnV-Tb}Ka{ z2K<0pO4RSuR{8Yjg#qUhZ;=2O(~Q@<2ibysjkeF@YUN-k8sgcZP>-gqNn*Y7VnCGoCOg|7G%eCyC^(p6u97saUcBG>L3Q&| zbdGuSbb`@Xlr%;ADl8Vr{N|8gcgmLm`~{ktmK}-e9B=8MG0AS+31Fd_*5ba z#(a;aig=f&&UmSE29$d6TB~MqmWF~>0v1tZ%3+@z7~6{j7#BzySJ$t#PRxa|xA^NY zmOf~WXR7%RT2(PdLs)lAZQNQpQ7w4T+GDQ|piDrL18~9AcD#$}?U?9s3_ZzguDiAU zZX_wYJScd*>VpR{%v_@Gc)4<1#b0AwB5aG->O3Jhm{~n(VxW5e<;wCI4Y3Ekf7v1e zZBZmL^cbV*XhF=CGO1#U}OAoR3p05T!Y>gTG?&I+Dyd1<9 zgyiv|`su^gIHN>;_)6sn^P2VQ?uV^G=7+9gvev5i9=2wcT@u^h=o0ukOQe@wC3C!w zT($hw%25OV8k=hN$fs`osfSM=s|(w$(E}S|`$hZYORxUY$CvHu!FFq`aj818yK)r5 zqT~^)EN=j($c2%Ys0oi))y604ut(5z5gn&LVkN5SNJ2WZdLVWV<|(&GzWM|`v4mF? zzgAi0-bz3wf5QuE(<4Z?C*@miFAFohbk!eUqd$`$wLJQI(4*G=dD%Iv-j#~(mdi|b zex~kx)Ebk_e#<4MLWn5~kox3NYrkVIi6MlwF{xoBUqueh@tkcllSoQH&fY*K%D%-ZvZ0cpsm|5Ee7wYusCs=jiUs)<_hjjEk>RNWB6 z>HU8LblHQt>PD)*YL}|nqWebGm#(uW9inM-w9uo_{g`M%W`T5~K0r_X?$IO5GH98Z zgzA?Wqg8`WyNQY`m`a%BeYalm&17vn^cMq~U=~1g9-cm|WT1w~m zOr7~pSmzm^tGxBrcLJWiHjedc#oLvW)urpTMzMIkbvPp6-SyUxf<~EHjRWVP(YtEy zJC&1WeM6QMh12L$HwQhq*29-EWx&EhjlM;=SsR<)=)-H&{0zK{V!bjZe^uOREl|^An=ohaPjSSV=$hYTwd1tLc1 zj^98|RBKVexMo$11f0{6JtWY8hLFyX?vr;4Wl zi_)zCiyns_O2j0m!x~WNU@7J8Z(JgqGx*{60b3$EEpjn7B4SlBoHP>RNrG4Hf|CXc z`3==`{Lh=z#Leizjq;+gS{=37DwlaQ|HI0v)R-KE3Fez6R!X1)(%GfexpQE7b^jdT zE_AxWT1m`RZtiqwmT-YWK2?wpzhz47kPa~By#hk(5NQGf08KILdaE<@V$~3cPvg3E zNM>H-Wv0b|*@cj;ERNcx0kF|FL=Q0BmX} zGImzhxZSk^My)#IS*tvS%nI!VGIax9&?6aoYUkh!*gqu*UlIr#K86;wUddOVlwS4L z%HKZzI^F_CNaqm0mWHIXU7wU0;5U(1(AAKf<9%V{PhOyY^l_zkP$QYd03?6;^c1FG zDb=WrIcH`Va5K4tS++<@)vZ#hJ*=`dsgC@(vLe+cFWac(3DG>s)sbY9n7WeMJU!hQ zP$}d+;sll=k!ya7ezgq-Cx$g9W#-j&c%xxHrLq7yozg&Oa#wz#<&vx>Yj#lqz z{$`#33-S}6VA$-WmCG=@C13Y2d=O+x#B!ztzC)sp^b8XaXOXK(*Ve;dfiXj9Ld6*h zlDc|zYdeHv=is?dKJ^V#cRUCGsTVU<{cyG6Ijb_iK^_{0&+#8n?>}b^O63e)w!E0f z8+9S53Rgn!-6)OHoqA3~H}OF%+&N2!o$P$&S}b&j{iflXB)L~{;Vufk7$Bqqy)2pC z2PhW_E^-z%>>+CHuX*?`!_`cM8Wpn95(yUJk~1K!76G_rxZ3hO`zx*T&`y4tFs;MY z>`yBz3ftr*N0<29He8MFvc~QUYwVQ%kai5$vXrW!P$}t!^b8{qc?RJFY9Q0pWnI?b zv0YMMx9(+^E)dYjo5Qn+WH=>H&v5ljmsPW8uRQcp%g#s+Z*U2jmhDBKP;4Jm-GRf+ z3*3J6z(e#xgErE+$D51I^iIU@Ru?>xK8N!gFEfgJ> zgLG{LYlY)7&!7nW9zy8bYJpy;iuzh>_l>sK%IFh!gq-c<-(@bs9O)AcYI9sDGz42+ zf~~HE`ux~sy;nZ44vQC zKPtgw4qgCjt)9L@J0hU!!L9T+hZ#I~VP-PNf+;GCruvk)hCr9Kb37Gemq;7g1tAoM z5rlXgAb1p`mT-lwQjuOZhcZ@-P@F_ULlJT62WZuEEyQX=En6Ttldn-mzEO|7`e>_F zmTLIg0BW%uu#YCm+eF^nxruJ4C+BY3>iAk)ol?H@Yg9)4!a9U@I%v8kmw0j}eV3H& zs#Qy0w)RqoykM2(u*vxV;& z_4#a39%O?$SA7mvZM!PY5?$@v73ZcJJ{HbNLCMVJZ`GV-NKu?u1Er{nEs~-*-&bQ| zf{_^@u)&&w?GIT5=1Rvl>0BGY=T22k{q2+z^*B`zGFc+K@s zTz1LWrR(YDud8}rwaQp8hY=587!s?zpXK*R`#nR|_-?E6SR$Ittu16)%a5eQk6r{hCCd?h%Q&u;cQoHB&%H!Fk~K~XDd-xU z8OQB|lPSHx9&Ppx&a82?u|hBX#Z10HFY?$X?Hf!#cIBr{vN&yGQQ9uZ&sE!;aHjSn zzkeyvETx($rB}!5>}TP_yy?@hwKJ3vuPLJ%>BWDj$w;vliuIeV`d1l#t1LaqAPb_* zs;R9CDwTFcXkngeuhLKjnUGu56$x}m)%Chn>~=^C9aZZ6*R875?!kdQNpzhY0_>DR zom7>(upwKXw*F)(u1m6Y?GAblS+RH**CaWaLS33bhJ=eTadlxKRsiAUEX36N=HROG zPq{usuOEZKY8=U>1GsUY@<7=*mG`DKG`2}1&>7zKYT}z#UFvVzKcgBGgj{++7@_zT zh7lDnhQ!?egBL>yc>WJw48)8Uzk-YxgE#qM9dARM9sL{`gbX zU*EyGm|dNBtjo%#OF%W22jc63b^sYWen9_vsu|0sr%iH=XI>VDTs7~_=jo;9st z9(GS{{Pb3G9P>BMlI4!a#*L{+=#}sEew#($`c!jr-y~qhh9}xx% z-Rpj8*BAq@JI~&uv^^J$1yRm6v|ZQ2y)P|o*KM$en{92^eQ5s*1pU~stMR!I&#`k7 zj=I3G?LyYye`ZW~{zBh_AxecHaW=_27W+AZ<&jfU>M{k`UuC2Hx` zD1(Gw$kDV)@`H4zV?TiRxtbrNkF(XCG5e6gv&FCqOa#a@umdyaCB6l8Fk2Po+0*lm z(c#{S{H=bFXCGYAg^N3+(ycMpW`i-dx`{HU?#{C(IA31N&RM1UZGk;U&Ca*2{eA6^ z97W|0e4PS-hl$H_RH&Pt1WwA|?1h@2Z|}FyqW+OXm5mkV%Zbt%aqSysYxktXxXXm? z;PH_kqLL$m1k_LS?J>qNsy1$q%zINq?5}uB&5ql3=9`<;`vvwfswWOUThwU<_Pp@j zv(8s?KDzKi+e8W(WY17}<+OW~ z+N0b)K-CVitBjk~0fX!ljOO&BL3X}j{4ITJgr5E4UclXn~2_}rjyn5Zsq{LJqQo)+pFwJawg%GD*H%-^P_tX0ZB{L zu|w>I=2BO^Kg7OK(kp80`@{4gb>&d|Px1igyFt>%(e^;qMt}(E?1)-h?zmO0wf5Q6 zHzsNCPkp~k+6R&>P1@(vni@9D-hpd)Vz^x;j|U8gh2G{Wf0&(A)zx;5x_P)g(pe@p zmV?wu)pnJ7d$|1@uf-iKq#pAZ6NM)3wWyzsu+I_*0CSkyIl?|i9{xNsfR*$hbmBGf=&=kG-9jTc_9~jL{O@ zC@JwRB+MCP&a}SV#o~l66_4)`(X<~K&tx4l)vhj@@5bZdjf3O7yVTKB?bnSf)Rg_~ zz00q}&M&?LhPN1hJsq6a@UBu9>}R(djcUd;lB*Z)Zy#MCpiqrf zdyps-r`vm!Y&7QrOwtJU$PEAQYVLIV7lZ#E5xUlQl60_-5HfH4`j_IgQdy?HoNf;^ zzEq_L*ryV4>YM}YCvhbD?t%6P9Cw&J!~TKu-~br1r|zCrGJe2A`nTLvG&CZbk$0`y zFau28rKZia>p3Ozz)V=>b!wX=3)4ekO5?;2`^QgAhdd z2iyNN)}*%|Z2#FXSH7Z7Kh)mG?06-8+oARhZq(kHe&>5OQC^qAC$exa-lR@G%>H5S zOT%BhDZTD6o3PFiAtdc9i{O#@K{@YL(JlEEn_{G!j5uK-m^W-w7yj5jYt{|hxN}O>G8_Qeaf?O_lhf}-1UPv75{~ER$@%wUp0;1CpA5O1 ziUVtlq?C9r;sJi!`nB`NSGEQPX+@kh5$v!zKM(9~tHc7FiC+T9FPqM2< z)&~VR!{NOZWtkv8*V@>rwF8_iC2de8C)<^?@xe5Gd@%Jp(M+P=zx8S$C@QY>a$bnh zgx;K>4@5;ZV1%&2RuPM&Ee1f6E;rQj1MSL*oklL?cxO^uGEo0vp#&rlh;Z`n3Lx!L zkDYAKG#*P^r@+qP=d*qWt6YAudgW*K-sasGs{uc^r$<}sFHuMR91efGTJdwZ{~c=3 zFChOr)g`|`fZcI}dg>SUp5|RQsLy|4PcZMfL4Ej3yG+geCCu_pb>1(@v*ZTV{!6|s z)8BqYML#~(KEP;8t5fZhOyhd>>1p=sX7d{L(y#43a4~o0kld5zw9QF|kw{-X*FKb+ zx>FWiOKhF0Y)8q*H&gr^JF2 z7~WD<|0laTzXt^^IFFz%_!C(w&bALVIc4zkv+d!=TGep2eN0$<@51h{uJghJ96|9x z`Jx5+QpY-V;GgYgagyu)v+WCC8Fr4%X|zR8q)$25KFt!%ctgsbDbW=Q>+M_gJiq%w z`?;vu*qgreVmr#4UUlUq_7%oE>FU4Q+-&q~oQ)7QwoUi6xZdb;)NT+iS3#zS`+lLxU(=C z#V^M&gT691{chCzTkli#O%U`l^+6MpxkXi9Zx0SfdH?HCd%%5%q?g^Gez?fq*Sz)y zb>$*^sHtvHD;L?r66f)|+V2K?NCvPIZlDHgKK};$$jT3& znkOOnC?al(U9zm{!sF;~kEpk9puyW7Q8kP0NyF}tST5SA8FnEo&?p5ODX{zzb>?D7 z`ks~QhQ)TZdGSWIX0biQ6}Jt)V~Kisu{~(=eG-pOyNKo>q7@aLM&TH@#}d{AW3836 zi3bsM;r^9s^o{lu#`UBdQPtbk1verB?o`b@neR5K_8V<|EaSNw?WEb)q&~mVo-MR~ z*iClLuy@6QNnB_%csI~dbc_x(cfk2BAWAD~kPQ4p-v^~B0 zZdji2vf6%+eWH2SB6ZBY_K3{Mm(;y>wef~pe6O8I28UsYR6D`wHj{aUjBSFoCx_lF zSt!a$rC08?M-6+><+w?0gtu~0K#C>vAbz7#IovxIo~RkU3L@)UtPWje*X+GAmdwVa z!^X!}yokUyW*2h|& zd8z@IHP?Gh)!c`o|Av}zA60pbhG^`|R1qztrBV?LCbz z)2FPq)8>!ruYxwagX^3z2u1wntNULdLODN~lLJ>9)VRj}97xJtUjV+;p-x|86V+=+ z`i?bdcQy6(Sv8P=qV!oW=PiFeKZ#jz^#k^#;yyT%Ucmnb1hllNXCJVyFxNek{@Gf4 z53@L@2G{TVO5N~~y+aPAU;nWE!;zuQKnyG*E#y{+oFIr%-QwEI)WCLoRK{wM#J!q@ zu%8V=co+|>E>k~lw@<5TzlB zf_knY)Xo$3_c+>7wjNotL``3BPb>OBED!ia-3PLEF`x9rkz%zR+QxMW)$L+B*2p-<}LIZGF;SgS+p-ro9qg8{w6jr_3w!!uWUltb6$MR({@q5oL}Fs%f86#i6xx=)eIN_u9Q?Zp>L*V$RO{*lxvow9j_?Ad+*p z+f!L4uG((TGiImv`?r0oS=>rc-E4y`^VPpTwd-{(B{gwk^R9Ft){LF3=++Xmy zJ*<}0W0iX@Yj^Ho>4E+DJ(hU>oC9|= z?YJkMzr!A4826?}@3aRv#;WxGQRe|!)TlnkX*VyQpT6I99yAGdaCW|rC!YKj~`h+GiHWtvmuJRgNj>jpS8%$al5ya7(4TH7bR+r(0q zQIxdZH7>yr+6OqJYx}L4PS%>qX*02A!jZ+0nOEwJ!gjJ33HnX?C#B8^3BFK2(D|b~ zwco;urNi(piE`&Ld((l{^$2mMYq!K0Y!Mgf#haNkg%U)_e=$A2+Nm~5j_|9`^sE2q zpL0avSUKc&toq3iCs8izb-75(mI|qZb(Z6gQ;kEMVTtMTn!~&p+2$fgf+Y{!UpA>6 z>DII4a_N@jQopao7En^-%ruTj|DcABn4Atjcc?SIEORVr?yv4AQkyL?+X{K= z37%NB%D1h-lU}u&KGEy09O3+YPfgPVHD?j$=Lgh>B@Jmm2QByOBJ6Z{RwY1N9*`c;kcPM`vIZHN&|3< z24JE}@8OhITq^m?|66{V7^U_a>-;sC`%%AFm5gx`6%V(4OKazjb4Hpkw5h~6dfG0z zcGrgFa$bIaNCS@pxyLLrNrQ zA&Z}=v-V^j@S=H7NAtTE_xuJaSN^S0tihS9_6Fuq*B2AMnfr<>$X$`iU6PZ#darNf zR>Sw+WttqY_cyZ~G-;PCCrNzIW?JicEu;^zRAv{-C<#0wdX$01nDso`Dbdy z6nM)&_Hpzg;=O$+d+#TzcFMPvT{I=9?D%~(>e4^jmt}*=m2KxvcgF3m&B3a3x^t+s zS#^MuB26zoz_~nXF2|?#uXa_`@>ZyWeP?Mw=a<=qX_?w_Krz79IgRD->tjqZeehsF z3^Gk49BJKB=e#RIu>CvETytP<_(XU$(}=iEDn2$*0ZqvmYzDtFCgUp$0y6m9x+PrIHFxPft`uEk&cxoGVjq@E*Y)-odd7D;^*Em%yE0 zruyj=x6%prebsqW+}iY?u6K?n4rG5kcuj+iAEw`2zL?_8fAyw>?2Lw;QKpfh5u zySgm?vO(m^Q{HuI(_4F-XN%3h>`dR?=bT}rda#w$nsh2!aJmGWB^Ge-^t-sfE_mV; zSix_JVmtp5`6f@lAr$RZ`bo;ojr$J;Px5U}+;8g7`Rh1q>1?^v__nbj)55Rf{`OQy z_VLuXU*DLm^p|n};eOD55%*v2ujc1*e*+tpA)r5t`;Yd6eF_TdMjZiHKs%Xl{ZA*w zec_g2xf5w`XG%W`+&`t#&$3S^#QmrAQ^E0Ze+6dQu+UH9e!3r!AIJUAcVsI(F7AI8 zJV|{&qQ3r|I7+P!x+C9yNSI#ztsr#_VSxM5d^CZBHS!960OI;#Ju2>Z^j9{UT>X_D z8TYpYl@WGjLw{X|$NiN-U5CZ}2BQ6jUHCpNX6s57d@t_L3qA^DhsOOi{Z$_l_qPUB z9~}2L_g6g&ECf{_6!(|)kM?)t{zd&m_#JRaXuq)8y14&%w%Gzd?!PYGO9jL>Gw$Er zKiV^BtUv34asSew2^rA?;{Kb#H+h=Q`1a@AKkmQL|1>S`Kcb%s_KW-LHSh&fxwJW` z%cAtYaX;14U-gu@|3H87ed7Lg{XLr;_wNrXn-upq)@O-tZ|;QNMyzcDF;n4QasT2q zL7ZrwCi1iug+KTFOu$x6 zcsNNE>=E}rc||uXPh$Y*xoktD2|azk{wB_*qX=l6m+PMF`ME^#6;E5&TTR~1~a44jDN48uI!_}diaQHR^ ziuh80lc#Dr7vM)|t%^>hmdnQiR-PB5=Tp6W;9`5VPB$%uD&qc2f74Y6eg@IlV_A^P z0jp0x3Hr;3gne3kZC^PxQQ{NDqXf>|oW3RMepwLTfx?vu(0!-pzFoW+5fUm(QGdaQ>E;smtf<-i zVY;Hseb+9`25qtl+NA1h+=}$5Dz|(@$M0#>fSCX_U^t3 z?)7Hh73$iF?sw%@l!@;2v44*t_0UA-8Q!zu&xZb=jq+1Aqr5rZmh_Ch+`|pyId#R} zZm;pKx?qw!jbzOv_g5sflijIADgMc1_bh{r^v(OYmzgaOXpw0)KcLQ@;$D+jhwke4 zrrDcWS(cHvI(m7q6BK%pow%=ilI^9-j#uJJbo<%$U^ zhUwQ1bcdSVUCNx{9x+5~(qb`9S}e`!oXGg#cqyNLJH!30D9T%BxVT8%qCT79j{3T? zs!qPJa=1ErraLK9*+c?^?WVFNGu`^3VS5wjWUfNe+p;u@FNTc6AL|H-bH}=iRa2ci zYNDUC47grXY>$JfG3DQ#w34!HIpq`0PTydqe1vxRASxd^VIdA zeuDnRD$*v_2ytqM{)s(zm41T$CFhbhw1ucxD&$+weEp3%AWyx;K~^JpyneoV@w@Kc z?B5g~%9l^fCqFHWrMQB<+HLMHAW_J9sEUTv_Gxp?LcYC8Kx3!S1v$6lIxe-)ihB54~Fr@l8nYIM_YGH10_M_JBcv^k9JDbEk!ktJm^$2%?xoDGm`Utmf?}m&2>cQbU zxk!NI@2hok^PQ3aQT4%v9Es)D1HL`Zg$E=rrt6M$4>c<;lTZ>6_is?^6Xsk`KO5d< z=_N_7Tsx!c z*;Cx>^Ki5>6XiGqt3UqCy&?4&2Fy1k+F@jWcBM#hvU}9v=!smR`8uyx)ER#P3W>dF zde7L1D9JP-^0={oU{!i9MN{LC2tMV6&j_3It2A%b=UU!z3b%-SD==WC6U}mR?`` zdDC?dul&bP!j^KLbwj~(N@tLk#1jvjG3?^MwOA#6hFY?NzY9@}jyz&a3C5;;x{Qs4 z@Xe0RV1ag}ILD5UJj2+gq{V(b34LvXzE;pcB7a&WEot7g$ZAO;f%_z7=vPTf6&sBt zm2l}O|Hj~@OIStu4K+5CX0iV1hItLkTAtoubMjf<_dlni8X5gB^su0R#m-Dve~=?h ztrYkgPRyAwW>YTzq-QM7ar@EkzT%zJEnb_hX_>(TnRSNj%GFs_v!Dofj{9@zo1|#XC>mqh}&y z#|YJqi6$-%FebkRQDdSlnX=8%x;L&^^5&(lEPCb2R8HAgrfh_=QU*0c3JSS%05DU$ zHCp%B^)KI^TEFInja`wP@;JQ*ZF`ibkepp%?~9HT!BILR)W^8Y7wO102XjHX9|=y| zAy!}r;8s$J$69wGyA$lo3PNRE^vn?=%;bD24o^$#Nw0g6B|*7FBC))J%iNxx$e}I7Tv^3 z)1td16x{E z&?Z}72v`v$iHhlga*!&oo7ylvP$0_dCh}7QQeHQjpITR;ji4>vMxgij(uRfz10O%p zC$ep5MM79ZsM%ycXsM6ASU0Z09yC3|+~!39goZx=Lc<>b0sOk7T9*kr8t6?L2n}rj zgobvI903+&{C7Y#GE#$NJa)sxuIsty^n8&VL8v5h2=cUR2PA&U2Auz0|Yt; z3bc2XSqmL%(I=1fT-Z9(Oz}Veq>@I{m#dD<2>)4@A)zy1VLib-5#bjneB$`uxf47{ z2+mXPy@`h!^71Y9o8P&^Cc}?l`m#Qdqk#e2rrJ)E8*9lK^}T}w*-Jj&H=EvDF?D*a zQ--xGEUOF3CsAp1QG>G_zBfw;|0D6`l@SI}h%d}+YoOiY<>Cm%;XkyN95BRZn-&?P?>f{hZwjRBt4&J-D@A1XY)Z?H8=mNS2VgU+!VCNR#cy;DI zs%n1&Y|3OR3RS59P#pZi_oRaeL}b>5lY;vJVAR@WK3F`JB?7V!3X0+rtg5mF!7V}#11@pVOKV!%RKk%$h+(u!H56&eoX zh6+vV*HxBd{-Y8|9fDH8!vQtaBU-b%OJh)(V8j=7a97o3a;SkcWlJ_nNu9enC@DN} zHwxjwVM$I_gl#rU$$Cv)TK~)9D^C_-oS+Gke4Vfa5QKI{(;LrnkFkxnRqc814~%!z zndi9|?Dcj`pMIHzg_sG9zZRgBdGGYUW%dSd@eqMbdepG<-BXOg>f-a=eL3;=;Q4Mf z_xHVczAFdjzBu2V#LEd6xPy85y$jqSygcm!cW71fKyk{PTFY&aW1ShWAv$e=VLkQQ~O=v9%=NYFTKLuD{3rDKhWsnAGbtZyTGl*pp;&{z&+XEs+2(s z-Jw6d;zP`6(adqQNWu`wTusz`Km78#>Geqn1JS_!n^%h)&F*rCUv|lzR@W|c>xQrT zghH0L+Qj%IqBz)a&Gm_B*r`5S=#DWre5Qsg_W-%D=jX~@W5}JAHP^fAiWleRUa(Vr zc)fer-s^7F-U>;Af&%x~Ztvl^6<%FA*ZUABc&9pjk=rRK@NaOBDB=7}<8t>rMyqgz z`){LHMelOoJK+buoIm1x+!S*-dP@92D(9rA+>2xT`s67`Z>B|#<%wiANqE=Kh5D8p zma{A5lqw>*!NMG%bNz^la5k&PCEYz%e2{Nm>E1Fq$0^tJw;-!_Ik!9?L(S5h6U6!i z>`L{=R@cvXBxCK93D=l8$GlOVILExD)%}#tZM@t4Q@>-4fPzcY9;@7u>g;<0({=qS z_bgR&udDrdCg1D6v%Boah>@F|mq}W1pZn{neZ355skk#zr^!)N$U`Ehz`7i9d5j__ zQRDD~?RP-M?RdUAaJBoV!O^&;rf>P)ZT9u16KVHf79O2NYu$0iN9q07y62g4o79?z zn8jw}b?M@D?jNEPF5kq~k71&hnuMYd4X^+|JdUyj8%XP(#i%aa4y$?8zx}>+>w1@? z`1Io54)^l%j#~*cP@3_$Vs!w_d^_ezcewe=t?6@~bk8&n{dksH6&P9@^Mi{YdpN~T zb2#!?bCQ*$^{{J>>w8_%WD)xo$!ZS5C#%Lsay&dEengc_iVn0WSfq~J;7%^AOz6z6 zzNkVqZg7VbbNG*)F0k0^>O=5rH@Fkff`fOLjGufW#J zr{JfK<081}5gAU(RX@>MAx9wD7UdZ3yU)1QClSAZVvSf@K#!$^35MmxE)hkM5~Z<0 zGkSkdYykO`PJ%9cXT|r*gren;gj;hDz{0M=(JL43nWH-5h4%iN36p-exZCK)QY8M zZgvN!il<0$0vV%T4ks+iLL<3W#pVh$6yw-3NRzN6s7R>i#gf9u28wDnFi|QCl^_gk zk(|CC9W+nOQZU7R4X{4C%^n+i!VS7B^CY+dqo7{c>`qKA&J1)CH8kW{QU9Uv=q+1v ztCql1M`{=GO>Fu5L-o&3*+2Ian$sd8IL6A|RDOpEgj`x9Sk2QhNDx^uu7pEKyRbma z$a?G`=`dbwe2N53(^J|D^UQqHB`~1IV>cI_1ec49kF2w$E(jNUZUKZC9+6+e)Zp2(JmBN9lwzyU7N%{4H>Gy<7FL&hn zW;(rq>r}KYp$R&Y!d$uG-Ap+qQRY*#Cb4FEGn@=$^qO^srD2BAX0t0O^ILRADAIpr z8|odmujqWsC7)Wm#Z8R3MZZ{^skuFDv@P4{IzB1mA1=Z8bk>d8Hac+Cpp8x`qOYP6 zXqy14{&I-5%oFxI!Pt_ix+?<24xkHv(}IXI>7@vnfpEB_zEivuX21}a3g;_L{I-&Ae1XxxdpdK&6!SbTEC2qw z1BQV(2|A$}n}s44#Vz5#aW=mlU#x)N5w8Ej=iO4nQrA82PC1kT)rg3}nwQcT^erYM zVc0=eW0_tCpP0^~rm$2UO<#p1cn^yzR0F%*u>@rc;t&ERffM9LBm$9zOiQSP+jqJ! z0~7|#E@WmI>fLoF1*9q%mGKrK5U32&;C57gCN(@+qO7q{pPP&Y7D%?rJntnvyq?T@ z(SO`viwJ@VtNEd*9svz7^IV}1Zse0YwHOW|@nCi^{UlaE&?gxXIRq;G^Lru(6E9Mj zIL*7-K*_O%*)K4agB;010VgK)Wh`)H$19hjM1<2e9TA|?+Guk@3?Wy6N~To`uG|&s zh$Sk-a~sHn<8zW(qP>U?3`JWPscrZ8q9O1RbPmUv!7Z zb+|u?W)r*^Rn-rc^cLwZp~xd*WFDk`{-Qe~>;o5oMR`{=J~BFpZ(8KzQ!rXags7#s z0g9TAQw)e?i}kxCC34Ri+sjkT=_IH%Q%MS^n3GBJrjVNS2 zAX~&sw+wN>t1#1RYWIiE@$Iv`d$qcTH+`p_wAK8V+^W1#$|Va_>r3t^0!yhEUUG+* zlq55v2$ofY?73SNDLVKZoJJDObk_5Z)NLlIWjFP@>}44OZ#7llm|ltM2<$ZE|3&6 zF-(R#1OhCzGXeDcWd)llD3V8T(?%MSz04EoKuhM#xz>SsEAihnXtz7G^K}SkzU)?> zTMy*Z~ z-1P}aLL3~l)$*ec5((CxLb^qsiXJI&cG*cMm~tr9>W(#P)XG=gi&OeC1kvGwr8Dc~by|&PS;i_{R_mJZ;{HI*+l)kf zj9H*3^j5M^OlENxKZD%|pR3D}#L%U@@8O{|k-2LWf+1flN7?oJj@556(fy=ynG10S ztXA}r`P^?7w$Z@Pu#NgWSzm{3tmVBM)(J2e%BVr*z2=UN<{>gCzlQ%oeuhJD%bOGa zB#E{#yi{YZk{);F5sAFtBx6dxE%Kp>mwN7Y@FbJ8ll?lb$plVzYAg-sVhV@+Uan-* zGnwTtaF%iDX-eOEDszbr)C*gn(Ro7dYeA4W31KGTeodKFLi1h1?nn`A!Lv}dEP4Ac z*JUMrM$)MApOpO?^^^x!MVYfy*IB??`oWaB(2 zR*@})3qg2^&7b&MH>p$Ja0izRVlKl&bu8Ot6n#NWxv6?oN^iW$Z04-$|8h3x7Wv=K z=G-#5vpMyjXR~JdxOGBS1aft8Xpx;8E!X7mbMO@Lmgk^;*I{MNoHs|W(OYaS_?Sdm z@&vp$Kc$JjS$4>Ri-Wb+h(ufg6W7W#e_Kphj*ez^iR2Nn6&4Q5hE|(`s@i-2C4u=N zFoGJI&G56Xr>fS7!Y*ON1_MM=q{os;mycIZGO7-I%N?2$ZL(8aQrPwI?gEktMm=2= zqlajhBA>VgTr+elnCfEp(Z5gv!_wRUoJ9+!*?i&0`QlS0VY1Lr;hb9OW3dQoiuGkE z8a2NTx>vX>YQTUf)LyGb3@Fj0spXk$GWDY+DAy1T`yH*SWe0V%*~JM1)+x*x2tTN# zSAs8gYA;61xlQRNdW~}J>TtTCV-~vMOXnRl?6pd?!=G0aML&~BD zRwAe&CvsV$TSk(Gw~^kRDW+U567ufRTfSv1FJ-|lQ67j&WKw6M(7RWUaxAd26tHlL z)aHY6E({I!6;Xsm3-c{BOoJkHQq&VcfS^wdjm<)ckXE8bHc+%02SW5bwqIQVhXv)4rrvM(^-@x}-*s&}_sD$YhmGy_# zeR<6ROC4TEW5c^IJ1YdW=A4u?C&A|Y=OoNhG%IIVIB|?bW{wPsMl9=g;nIf!hGyfi zWbU%!-8_EG^=dNwJenv23(!khq9h!Dk>%i_Oo$*7e2gSYcuMI<`4-7!fr40xL?8jl z5Ii^X){rq8`kF*oq3q>lI}}u|_i>Zb8%+?6Xz8dv zT&JYjt7fxNb>@5S_>@z{ObcT3+BlDhRDd#_s$^bhdjO_r!HTx43q`{kMJ=Q+qs^&dN8FSQMgct&0?=60Yy*0v1VSqz9SJ%pZ_wmHJr#J~ z<0!I_48JY;t%a$#+6f#~Wr5-w$N;@>ANs!AU>M!%?;p6Y$C9-Y&%jTrwY}~^MpALJ>c__I=}$g%PdDO!9b~#G zL!SpxzyH|X&uovVJ3e+N7k6C~#<+%i_?_3Ne|?NHJx>*FclRkBbubsV5dB1uNy_+d zRL5?2k4ZJ<7HZ5s$YFTBci)nI#dCA^frEg-_qOaSo?EjIa)4f!@5sL5xjp+(ASY*p zC5`#Fd(qEPegamJ7qT#zi2l{gP4B|L1a^E;68+!vVKr=NV6i~ptq^I!>Sd|+cZq86 zj;OD_UOeR_qFfND&illz++%!%Qz~4D#*SFvR)$!KN@P9$cIv-+Z_j%vswRBm)-V^l z46bexM&_x%ed1P4Z4~rjTHQuLVbML3vbcFuQlfWuONA}WRms;4wf&Q}CK$`r_D@)) zwy4CXZrSu!`8ZTNvPS)eCWcKIFh?P9KH*7ofVJ|}7gp5Df-0zJhw6#OhM9e)TKK7Z zbgCXc!lH^T=}qWQ6AX471s6z<{fz=nUIy*71A_p)m{jmYKrcYY3ZS3rF)dCp7swMG z!1D);=xTum4K8SRDQwc|_NSO2mFAIYrK)70JbEx}$-v)+V3YsSJ{mDl-%kR2E`RIsvh;gC-hAVp^o0 z8$mSDSO=P20v0ISA}J`76?9pkl8_mFK7?<9txB9&l9XpykgKJU5b*Xw%ze6lar1k6;1mnAr7!z(Rnd%Vnk7E*}Aqqsai|mqi zO0jbAfCs10eYZR}u`rivFi?fnuz!_9fA}|P=vn#e0cgS?x>SAxeKV=w6IRfSG|_OF z96bCW78_>tsKOoYlaiUZy*(`DCX%^GT&Uy?FOI7Mi_snVeaCeR$7LdSa#5qs0(5l}l0xo{G zVT8AIM2AOAVNl4^NYca`VM#%3JXf1iP7q+#w}TSDN{Oi0j1@hZ3r`tazw8 zurwA!M~dbBm;WsJZbZ!JH}lzfjvJAHU0mM+bAA(FEm=`Zg7%sqq2o}=q(6<2)p zas^?EZt)^V65^MuGklvQ6kQj-=6{Pa5cAxbeGrWhZYMtbM16#;ClGD$Uk{hS zyp`8pQdJNY)s0(};OiFA+Vcjf*>4Z9xH{@&pK}8FPK5v135-~_?Bc-sCu&A4YKQQD zcA}~ByriQi8ZmBy)zp*wZaGNTjAF zc2dbWM2DP#_vfTrCLPO;H4NVWBy^ zU#NSVk<8a+@-vy_aG@4AiRi&&qTTqs_ey)>QN;w;e<_}@QBI_S9p9*c6^$X6nzD4y zs4Uk+V?S3XxUup*J4~Su&5(tiQH7p?2rrrDSzfe7o*J9fepYOp+7XMD4{Q?Q&T(|< z@^@u4N1HR%RaR^|xY}sND#`-Iqh%MxBTs#1#ST&Pcev#fSo2VUFjOH25gWK0X#@r2 zFb^l6e(c56C3Y+^9EHINF@;}|940^q6HEn+xe38+si*AN(Yg({TMPe~n&!k#KI{WM zc#8>ltc&Tv1Aaj!R$LZz0K!(~ZU;{$bI#z!)Z0#Mcsyvdh0PCJmASEl)gJk=@*%xq z)6uM$HJo&8eB>Bk4wSGe(z3pc%hK1lv2i4CyRq+UP|Z$9`8(a3;zMcyC1`d*x^T8?LG zfhSypy~?^&o$l{+wK*>~#LJE|m(l8R{xXkoJ~kdJS2OctPM(w!sHUnT^JAlE;ZOOX zSby*4yBbgs8)V5iDkC1N?5~bZ7G2#QaeAv6QfUdnUA35iC+RvTt8?SjX{#mi*wJAo zYag>GUtRU3Tcr*th#jWP!kBne4=IdY-M&68C`f2 zp|5OAviO}v4VgIj5x;wpnm-_Rq4lVZS4Bx_Y>bwe zhOdq)g#(^k8Y@s|l*R_8*ml!3vPKt%lo5iMv<4l{HVfv+GyVGdCijdg?o`)u-C|at z@J50a1zMBgKlcdUM%?@?LCByqO>|oI*IIY;G6IGcX1{9TBK5CIZ9XEG=_ITnN`8@)?PWjwABT-#}mw z1}*b>wtUtCRd#_5|9oiVgnl9fh?H2C01S&ca%kA9>yjafEVA z+~*gtY8TH5Y%}=HOfeTrx`Pm$D4!VSiRod%qB$&fM%5p%^u^Eo1GISFA6TwRMv;Jd zq|9<}TQm`yU@TQLD`I7-X3RNwPf4Cu@))AMqDov&W&-9L{VInggvL!~Z7Ant9x^S0 znAMPNB^AiJd&t_@T&=5rzUWo?KYU7{jFOn+6R$_VA z~VD?0tYVR@KW3 z_ctt=X#h&#T$gcUByCKQdL;2s3Puw9ZQWHIxB9(4-L6M5MdXIeFtX#68Ad&#e_%BG zhY<%iS_?7Q3_gNkTxDweg0?FJFbD!$c?;lDq+O#hcT_i;{iB)+%CKY;p|6Lvo^A@k zZKJeww?}GWSi9vJ{I{E|hWuie3Fbx-qr@#yF>UJ!r%Sd9 ztUXzsEWj-KZWOK)*E346&`lQhlZ2K?`>@ex{!wRNRFzb-T3rXJ$me!az1R;LqMIz! zS4eOiy0wkV;eo`o1(}>K=E8z#To5kPNGprzwD7_H`o~9Fi7*wO{~(WSp;A)r@e^8S zaO*9urw+O&-^BAg!3^>4!eMQEWUD9xF@GB`!|6fE9`5&$>G7td&`USzE--gv*jf4C zV8-MYOsc5YT9}evAQWTb8~| zD}e0Tp9{FyR`u!-SZSAZUUKx1gN6ZJ0O+>02+$<9El0Ty;TJ?k8jWl{dPGp3-#WZiYs7zf*cUGY2(#1Dpa`K}j$BfKjUq^b}i`Oeqj0 z`XF`8cXF!`cCQhGZKF|Gow(!D?f=KzpTO5uR(ss|*(cN9C(}uqq-{E#bK25*LQ0`! z=&3@fGAjr&Iw7=9AlG}ZiYGvUh*gS|J_Rd;K^mcjh+vAK4Olg3(G<`dwQ|D+H6lW; zqV_7#_xoFWpPiGmsQ3E+zn`~g_St(s`x(}=rf047tcSlr`t_9hHZQTcQXXL|SJ=eo z*rzC}P_0gVf|rfIIN*@HLHY;4U|!;Z@DoiGf#o(~kLVjFkRUctP7QEX7#ltAs!VU8 zenR>xL3gotV=9K@{Z{>U``b~l3tvm%8npN9|HS}>4BGvbF|-FF(;Zcd%nTy%b|4!F z%YrwWhC!<~&KEjkld*^XVJmF=Dv~TFuASnzq5X8LEsslcQi_fH*P0x zTEY4uJC2+76ze?PGRi5l(m%502cnh-v=mj`tX1)-S~4BTI16n zq7LvcDNc*prqDGV(o4tslk!MNuE;sT1GIz4n+WMD}sZ zAQd-Pqwcg5z)_y#45|S{AU)8qgY>VkBww7bTFG+ZDR;?qf9ldJ0z&wSkRgO(45189 zkpw7ZF)#}cBO)bOL+14EB;eVYz9Pt^R`dOjru)raulwb6KeJk@3Fh0d8`gpC zNp*nuX;YyD`(pDg@))P`PbSEF$yx@;E6f^5*AV@FKLNi4nA~5^S+P;KYtmPU?wM_> zxW|3uApdGc|EGie_jp&jGiUgRd7pJ(nBnISze{osw=z8md6|@Y7>4~N+vPyY&MnNy zrLQ8P)?dz1M>R8n_AYnkO#jH!P^Y6)l^|Bi@ZoFmK}&id2bG0DRHrW&Bl^9koJ;w% z*1!Lzg;s(vSwRUPC)UVxgu(ikGyOvfY^|`3zqrIKNG*Y*$qu)weNDt=jpC z883~s9p1K92SRg|uS$yh#CeJn5)L$`IuGfetHd zEwT?pAW$nEl>T}xdLF;`k4v#fTVgAT71{SbgCj8OK*2Qb74%~ZeXrI ztwQ)#a2KEBk4b`Uljp%0uHYC3e-#C;ndg6+6TB+t1Bh?XK*mo$?B>t+kL2A4=le&5 ztxgDA{oZ`YrrZ5?J{+doOVA0v)J9vA z7WqrKoVLhcG~DFp7x|0l4cyKuth+Wc>eh}k$W}79^lD4I3u&Cd#j&tMi~LoU;V16s zgZ)o&Ox+^~`;Ifm8V>Ony0vfe*M+|~HGh*jnTZ-YCK*}I%%vZ3-#WxUpAu6R`x6+z zvc>)fsN#;r{)}+YW8C$N{ORtGi?K{^adn3lpEtUXEbu3~w;k>W?)*cUpe=6wq5ecJ z+u||}Dl^{w;ZR?v=e=i%Kb=ea5-f1tu4{>b?^!N$4PXkJ(U|hl_?hNNku7~hIL&;& z$(?eTKQ`=NwHwldE8+*K@WI!3Kr=r$tT=mq_&j^KJwN<#M&0dhKHMgL{8Ajh54ojF z{mH|VasE<&K1HruiaqdK?(U^n9lG5UOR>~-yVNosZgXQG`Z}t*BHiN}j`q*?9(5l+8WMO+wv!4-VB^t{zz+BH(F}TrYgi7B?;z(bGq=O7 zUGASyX|+vs%ijzt{9!pj{I+Y#`v;Scb7kH?X`;O*!y{+V(b&j=y8H9~UC8D$SNNCE z@U9im!=tYJ&Co;K{*iC?Ps$iR?5Hh9yN#!|HnjE{uB;2vJG-~O*{`w#o*q2b@26*< zU&(ara8InXIT~Ea9PM!R$JrQ;Kh9?J1IIC&JKVL$*-#!n4rJKjCa&ULk6W_JKb4+b zyNcmF>h4(uPQ)GBy$YPzvGJ`|*O71K>+iS^>$1b$%4LQPI6~VX#N_X2u33NyxuZur z+@Hd4X1vW`7{V|R3@5u!zRmw35gKnj-am`WUB~+yO3zZO($@J^C-^_*5QC{F`Wu^| zXtY409xU{+N(BAA?k7&fiVQ^`dNO0Yqx;O0{oXSAmwSi*2kLA&%|DsTJ!kr@?()<8 zu{yNinKS*xG6(NI%|B-@b>d`zmm8ETsFSqDoic_YLhTqy{%&UibRNa;&h?LWH=XXE zMtd)x?zblIsB-6@?H}P*zSD0=K2qymI@dqSZF;BwV??|6ZbrE741WW?$)4$dChW$d zQ9YRBmcPqCxaGjzaJ$ciO-1cK_pbfg9fQayx8+W;kHtRx*IC6rY;eb)1M}bE)|}(F z7eP^X?tY;7M?11%^w@uPUOe{YZr=I+Q{mX}4`;eUGd<4jez!j>OS4e=+U!_pa#KkAS7opVIz2d$8 zzj|ZYtrP~-Q7$Y6>ZO0+`rhXs?mgvF?+1oYxr5$s^t$r>{z;hf-TVF98OYKLK%O7C zJ1+2-a(U(gOj8fL><9dB;&}Ma5BTr&y4;cr{gugmH@mhAspdc2<_rBP&40X^U{P5fR>v6ay! zx4h;SuSN`Qb?2=1FXhC^Cs+GBy`MU_2CV#<%YM**l=Eg@{xBBipS!<&&|k!`=Y9y% z?{a5-$bYQ)=e5#;DvgC!Dkv|yP0Vff?M+@&A(KjA$ce!a(4euS?daEE;alzYIv z^CSMhd3#*nNBrjTzoS_&3ShCwZgdLw`d-UvOCLM@@Q7&>Hl)g7c<@gckRXgwX>e9 zP2wkHC%vbIB);+Ihbu~yR^*Pn#Gg5CAg<5G9yjAIyTqSct(@(7EiKabxd$%sSG2?l z=>rsVxju=FO!v8(kNKxAyuXrrq*IN+7`=%V9vCGuEo*68WRGJt;y(W|q;{9P>tp`$ zwNKWvAGAUt0vv_p_I=Dh4`e>?QvX8~c;Zr=l25wteH_QrvVZX};LYv-;{WG}H#hL` ztbg@i=Hc7^&5E!8H>Ulm?$&?z|LP6*yUO}4wp~Wbt~sCd-`@0NEj+Q$S-*>KzVJ!^ z7^b%Wll~oCKJ_VodihUk$*x}WX>^}&eaior@M)_%@iM=f-1zUh%)gFDFI)yj{KN&H z_TS6hhdvE4J>Wj`X@4!3^V(3Ae&Tjt?oaGKx!~7%@w4u$+Hh}X2xdopJ3ET;<>!SH z2+=1(-nO5lf6~pTcY8H25>^o%BGBx1TsnEFYrhiN*yV1$%0JBQyVAeE`1^&m{sUfD zH_yj<*ooZ9KP-DwbMd?z)e=r^bM-wNukoiBw5I{f-@JqpA?7Mh((VTEn9U8Om`El) zp{b^krG%fUT!C7*?dm0E9CIgPH!OdsNSvXP6c-;iTs&j=%b(2Z^f!vdg-eyL;~GSD zzkB!^f9{x{#tcDNtl8S({=)l^<>HEykj!uYA_e%gkYDE?H0IGtOCwgaJll&;tfO;}x!c$IOVy2M*7=hfOTNbgXvJzj zH}W~4wC6{B&Yw`Vuc>-tHBQ@Sf8F$^B=Njo_?&-ep`;SUl$3T#In3l1Oe-PQ8T{sQ ze$jClnFboQ@*^lrzC^A#A+_1utalnO-o!#QWXp)ArL~;sD)+ElR0Zl^?+FA7Bek|L z+*0R=p6H!g$2&(ypW zvRTKrmlW->qVgD9&q(Q^l6_Ih-jb4}N@4_PCSj%K-NV5emFzDmIbbDa!DG6aes|1u z{`^HHlVE{b_A0~-{Kf!Q;r9eAl`Awska2fj=jRrb6yvkYMDO#n(2{G5If?{<-}-VMeIvj z;9sW9jlBUKsos6y27gKeh6=?x%NHaV^(39U!OxthgG(ZEwYYT*E3%1{r9Zfg$+u8- zdRX#^=yVGv%Q3hxJ%o@4mBO}az{t|y6L*Tfaj)Otk8S4QHX_y%3h1&RN2|^)+~ALW zXIb=0D-Fdz+AG*k_yHIV)fJysL{CB9lCqW2Qzmq{tlPH1A9s56rb-cYj0vJl3Z;QN z{7-kF-^^r&>}Lu#Kp{%Ll7!(Z5+) z0v9{~;$+7|-JK3e2v3`L6HB4T+-Gj`e_ZI`erV(ZGYXZ+3ZiR zAJtcP>t>d1kGX&U5>n`P_w_GfcevdxzQv!-8uR>H{P#2T@806iq{z#+_!n_G^H%?$ zIiqSR4o~<_1RJ>exeB6M^E*hie8VZZx4PFeL1n?K%Qw8?zef8Bxi!nbTrM|(lcGAE zNh=Nx$Q7#dLmP{CS&og1Z-}MX8{cpc)g$pooI~|${Dxzw+Tu6Vy*9q#5UP&&hGVGO z;~P$)S{L1LN*AY4t&d-E2vu)bY;~S2kx--7v1gf3!4F^zl#W$QkwIROY^r@cs zCSCdsXRd75n?jNugODgOzMJBbHKpxx;?(dkICE#X6CAlS3`tJh*&0_sW}D$2ao)}Y z@f(iYv39LlQhO~O1}mrS40o0^g~J16YflUmup2KUI@|Z%E`FEccUZtsci!?$Dlsc^U5?$6=`nIGKluUb~4{XPl< zhGxT{A{KLGSB;)1Hy0rsCnFkb^6NaAuXC7|Kd5_m(jESy6U75Lkb$6Cay*3jJk4SZ z0g`Zp^6a;UiLI_?>zB%uTSNitgXnEJKfToznTl!{ZKeC+9sYu{`b^EnJN=uREHMq^ zo0hkbNw<yubHOfBHm?J1d!o&732Xuf_AB{>vg>=U%?k|KvDUFFWN%V;@2fyp{oF zn<-?ZI5`elFNkx`beT(g}WK*(Y-ou>BQPy<4MD)s9X zzADL)Sf(6_^Y8Lc%URSP5ru0Bl49S$ z4)Pk=o=h)L>OiebdlDCu%B2>Go-g~j{5mXbJ+@0DP_l(e{@DIJ*1Z*(T67T}`1u-) zjUF?+Jh_|~cyHv;sm#=HoEC{XG{fv`W^=NBrBo_Y#!Q=Giv8ItjK;`LOKF%=yKQ?; z7`zgs|H@KP!Iyg~a&Akfzi9IBC41O&Qd(G_U`nC1`%|Yst;JA_Hfz)-^+F2c($~3p zU-2g&8s-xN<15J`cr%reOAR%~+(Z>1Ds|!3ug+Qj6n3K3z|rpVulP&eY;DWMUeZy1 z%cozg>yb+A-qJs{P)eq+2fio=8FtvRi*rq`-u>w-{t*+cBKgcmR)h~f{RemA-F|CB zMfwLtSAl4jy{rFzw|~M)`w*w~s1I>zr(X%jqNHeQ-XMu+ugVb*wO?hm_xOt|>e;^h zkXv~VF0(N6T5Qq$ z`hf7lkY8X-iY9aUHh*$caWXr6Y@GjTLGhjLl5PH?(+p5-q#0RHUVC+o#Emi`dRIddcvDbm7q}`}{&@Y|Uu3LeZtvT8i1kc=jpFeI=h;l{p z;(ZqhS_Xbgxap$w>uxti8i5~CUS2pjSU|4Q{q;U1LY+JBL4VGn3UUPz)89=hU_JV! z1x&swj1|Ow5i#vbe$o~6(@J+;mw(haf_Wf_TvPsO#GZsl5=iHFIrqyhfA$pNmo~eW z;}0o^+%42jY;F+$!2eXc`Cs+lTlhlH9h!U$ zE4EntBajG8?&T9JmnN{$xnF#ZR6mt&-`D)5`~tC!*x7{)9j=N{N?V} z-F{ur1^Xl4NcwSi*8~2XYDU7oUA8p8TJ82cfC;+7r5^OB)T0ZgpF-xb0!pM`vv=d` zjIt^^j_}1>Fd6qROaeCw)?kw zIrrv=_7|!{afAVd=sNn@_%0I{^YmteqdNn_u%b%s{6Pef1j7U`mb))W7w^? z{MB9l7{;fz?#CbVKkp^izv9l^38{x%DvaMtU+!+&X$<$7K0G)t?(}`{a`(rb{;_p? zAPoknFuyXqeXG8M?|ZGg=sW&Ln>XLFQAc97D05Z=G857MymK#o2d~2KUGBU7o4H)@ zUH@J1j7PtVyKJo+{H}k5Zw)sf73Y30Zn8}GS9}kEUFH7cd;a-sn>(x*%V);z>E+97 zT-oFP(#h?%6ZlmLM#b%8|C>o{3K-qS!sTvv*5iJ*l`Q32sGgkh;ez2t&P1L}3h5!Y z@o}P7KI3*h?qBasb?1KHZy7TH^NMWXG8BM_&$?^A?;kSmXz39wfGju`eFmvG9!$R0 zJ@tJJue<68*bK_P0ky5!_ny#)!|kSwF&*_$T+VADOVSho5)9`w^^nuS@s&?=6Ee-N*a< z83hAfU3vhNTi|t(95{5Teal-}TgPD0pLY@3Cq_k{ZFmV>1N9RCzm!9-5^9q-HEG4C1nU0kp)nVNX9(k-4OaCPkWo&CKBOxMx%E~-X z7o}eCGJMnKbuzjMi=lX@Oo5$m6KO5Hs{7a?SCTKzT4gS zwEy7=ox3@@RQXquT7eV$^f4_$yi4=FyWQj;`==%ccXwa%V}yTc$;Qzo@Bc|yvhqJ5 z#bo=F-7ETWIM3>QGOir9Ra*J*3x;*ov+fr^^_Mnx$g}Z%OciW1VP%}r+WQ(e_hX-bpCT?WTmHX=G+C*#q=IG`q-E4fw zz4j8ecnr*>@i-%wfT5~1rQD$}gTm`y>AwGE|EP-8=~z%%M{$5d_g`M|KT+jR_^$eEFN*N7V%H@G5V9h*z^!SCiXX8|=iLesNuJaCm8P->3_2NZ$72?vv_+4<_e+ zelz=fE^n@GL}|--kk5krGhZC_XkPxs?Q1FJe%%lp;jQnU))?F)w^!eo;9X0vugMk9 z`>hHCTxFd|$B%2PLgf;2V1;h)`VlsM`g-@irr@Hc;ex)@DLRq9zWdpx;2iJtE4`!_ z?((Bl91|v_ z+{KTjTNE>Mt9D%U}6O^$!&qVZ(-2tHcbm=^IgrNV1fJnv|zkfdy@fF!ri+VnM=+)QVqM5-N_0RJ>(ySBtJ+He>GlS#HyWm>x)ANHC_xjA> zxcZ^eGJ~(X(`E&S9jL-vX9dlTonlla4Rw9aeRo!H8v66Z*})VpN6Ze6Jd~h*c3EyF zD#3!T!^|VB{qUYE&5);q9*rd{EQ1skeZrsV?!MWcQ>lE}PHx_8-k*Z&eTBVA?@~HKIJ`t2pzo_0qo>`)NN#n=HG0oDnytgt8V6q{me=ED9=Ca)OVnqERkmgtOg2{7P z4O>*k8p2Z(=JY6LmN)=jE}S2H^pyS4$SmLr$Pb-w5Cy`*)=JD%N0NP#p)rj4LF7aAAS>dKD4nTwkpM7JzjxXOC!^g7ZFuL$I1p$1MydaqNOc;#4VB4R!i-mkT+x z%a*T5K;B_cSfD@?^3xn}I0|M3Wz6nA9pJHCQ!4fx(I-po`F6F^H&ms@-XC3YJwrP>~G6(|rcq z7vB_|v+S5I@a0EPW)P$6m%7K+_YkzOwvQ9#qTTgjr!} zVjByn3~4GlbS0a^z3zcSf+J=VOS&yr^C`}eT!EG8vMc$WwaBGJ-h-ORa=A^WwAyTq zYh4^H$;uUnCE~x$_OQXr87^+}-o~miu`O4Zt#y|y4$hmz+Rm~{C(|#L<@{~A+Ng!v z%dVuRmlg-p%4w*^wHzAc=G0)Hht~9enyb#SST2^YQ+bVUz(bwR)O5f1&|tAwe`j5? zB9zEoIAAN>yN{jH>iU9Yt=pFgsw9tovLrZOPULBa1)s!`+i_U1IKyFM8WZYgg30yr zg^$@xbNY@L$EJt6|9Du?irV#%YgigI%N0FyDbny^ckOuDRvz`IEm`c3x}!q#5?3UP-EpTK8RQz-M<6ni z_JurU5bNA$jttJ2$OeQ8bFHx7Tnu8|Rn~p4O`)lGQkadQ&VvIR=4lyih~rjdrWn8hx^b*{}8u} zC`xZy_b>9nLa%UJdD1UbXhTyM)>KnY!7R9-m!D=)0^kas7jbNcnh-mb3Si8x3^J)I@yc9X zruOt)3NB1S#@Y-&6tn7YI|JOS*#i34;jk$eyN!4>rz&8*q*~dkQ3Wi$Zh0v6=muz9 z;fCIf2*O>GS&_gorIfKFSr_^WEF2F~*gLWlBlSiu0j!+!!H;eR$yMj zr&xqh9v+mdl(MGUGj+qP5vG(Y)MqM3@yks8Wopn$!^gJe>Z4EUFT0Y@<6C2{6b3`Y zhna26`>0i!%2g8>)Rt;wFm~EQlQX$+fV>0{n(;K1<1h#=jD7Z8uC)r2ODLS_5H58~ z%EmFOE-}dJo0cYLhT@~oEjlI`3l~~-47>GjavwYjqe1Uhqo3t{RyjbB`(evkl)&lQBTja3EFu`FLwMi|R zSO%{aZ_x(2In*)aKDshE>}@?-|0ES3MWoaier+GKI=>+O2{e;(*-Co6-0b1abIHsc z(if;$d!v+wo-wO4v$c7SHrKm9tPJK{)ZUn#%^OVTgi|Fmvn}1h{!M(fjYJj;v}caR zUXL|~Ps7WK!x71sl#e-*l~Q&8({-YgK|*?we$G9=Dp*hxJp=)HGUcs7&YpmVVVM)&8XUtOh#TG-oKom% zAh~Nkkry!dOy^~b$KoB$W7aI*l;?476mKf?Y~d>2P;mH$^zp+t<#|+$Vln(b!#8+3 ziZ_KszM`}o+uMq-z{`;zm2@Fl`VF}kOTQ^;H97K4Nd;uFwr>j3jX;)s2nY3S6dZ~r z4Jf1f8V^H#EvbFP&;{EV4kG}`C`c<>1ks40d!^M;`*5*j>6;M%A2HhE^w}!ljmHQ4 zM|5e#bdDI)h+&KX%7`9~=FlxRCC7&oU_LK=NnaeHDIS74^8YP7dBem(;@P#V#c?b=z{YGv2lpnXA{bnVO@VGPC)enZsRwLq=Wh zx6Tg4U9QP!C$ZeaVY_kfdtBWq!I7uGsPmxUfP<=x0R17mW(YkYgp>6ZXELgUm>;X` zOEWkDA2A6L%8$6u3OX)Z9ktWrZaXEIe!MbwW;1?e0Ks`3*_)~jFi9zf15Px;l zNd##Ui|DS$HJ%z=n%vvq+D{E;G`5jRkzg4V;t6XcC76pxPYotdDnd#&jl>!jpNTc; zU&Yh)>Z!rFnrK$6OD^;FVCiZ$CY_WYz|YEQcWzYf4Ml92?gV0FLtqTmoQaYjQQ)K&LME(&@px zu$Smt2ka#YHrBf%-igB9N#|vY7MhdWyoOkDAWx5grpV$!m>q(i1uHc8JwT) zZ%i)7;oDX&l{X3co$xN!q%XJ+zbiPI`ggpGzCP#nyepVp-OoTmwE3-TI19_?cDLxP zV0?=;KOs`zk)KRhWBLVm{#n8DB`*p>IIahEMO7ORjFR~Qt7_2x4)YtZ5ltGUd!^fS zRxqc&D{k;by>UN1E0|Rwa`~M*`s`rbDt*a0s#@phLp5Ears0^Jv%y`Xj#z;_5q7ep+mpA)=wpy}d^Vv8@iIp<@w|CKxD z{9t+IW)0kPlg*nIf12W>njl zlHtmN)r3bf#)b=m6*F&3dS%)vfa|DyS^`?b11?upw3sg2cJ&rQfkEyMUJxuCw|sRj ziJi;pv2do){GMtsk#ntstcE-*+Ea6J*(F zK>C?#VI-d!u&h^CM3sj5hDks+T&X^&2|t)@DTu3-Y=Ml!Ht(e110rSA&$Ta1!3T^- zrr=vheF|qWO-yxk>*@Iaqq5 z&+ub_5xZ_6eCB`DLEyQIu=V^C=1;UOYaCGBVi3fH!z&0REVr&DSB8la#GRLDZ?VxZ zX+tgQGwJnO3P&Y(+KmmMhoAY*W-5Zr)uk45u+rYvX6lJX+R4FFgUu@00yj4J4mR7g zB_(l3;%^vpfGWxyk6o>eTKTJ^8ByZILdL6>^|aUzR^h zI74hy5+|h1Twe@R07vZyB<&$!aSTVMM0J42#th4O(8?iKHS-IaFU5sLxO6$Vpa?rL zk(tR?%TW8d?$z<;MH~q;HktAaf5F+SFy74Pz$RvGPelq=Plz`kDwMQpd6Mws1eU{g}NE}s0ZCIIW$PI<9vG~50 z|@fbPPL--<|K}xews37azlon0;ZG4HGY1Yv8iT@Vvc_Aa zpS|YrpRY+&@UowdhB?)+@_5h{fRA-z3tck)?_L3MO8SRpkfl>mL5fWtQG( zlJ0p*$?VB$OMQX07C6x`%;N}av`<1Pd;|2Gml(of9ii=DMGg_hQ6#{L@KAAg<$Z+L z6ScjO{@}B&ES*5;)o7=!@>tEG1fWCoIOlrHk|?HN!3QJC6tQEIl`-MfF}j|p%pqkRR!GyL$$e2>gaQXx zju?k0ax`0;Z>EzuwPjf5etk(Wxv;+4x^_U)-&8%C^z$7Q5AhCphv=(TN@#%2W8*y{ zd-i95fj=ZL%TXStr@Rst7rHY%L4VP|koZ zBovvRX_c=2D5tBd*&bRgD%+a2naQ1;v8bW+)=0-=?6t##d<_(`e68Xf<)}Ct^n;Ju z!LarG&i7MVbGp8^C}8*0mST)1Qy=v6!GJy(|8_XxDMg zsyPCo&HDP%&orAi6)Cq){jk~G6t!P6nVanG+!8@elO5K;oRS&VEGGqb2on2=d*^Sz|SWJDU`1W#H~%IEv69>G#y zsQ-ViT#?Y4VuAN`6UvYXahq3+?m=0Ism zC_MoPs&uLq@acf9{m%m&qcBZAUdGZfHfxC5Io2ovot=$NM}9s%Avx6S zuKGlft$(puiAXY_JH-c^-G84kW=sVb+tcFCePz<*X2`9j=mItqjE21NlNhuzZo(&n zS;sv+68!mH+)seq>7od0cqX*c&`f|XWyP)1Cu_s-C@>TJQ*I*#ih>D!mIVZ8PuN1x z=-hmg1^onDJSZ$iJ8BV%Qz*LLz}Gdl=)(q-_M~%ctYV26JH{?y85B-fr}fX4HFT}F zAhO_x(Gm2MuS$P{ia}uh_BP9z`!usISoN|b>JjxoZc2Nr0xXU&Fr^<%4Hp?|%6p=e z{1BMnshDOmH?fET;`&UbP5)qXp*a5&qxm1wB8)Pm#cek02GifAKBHTe^j*rhy3RH3 z`b$4=Pt*VCB$7$$X>l0oA4h96o{kvHdo-9o>q9c@+RChU5=3lcPk%jCd=h^&Bplbs zzOaXH;@x;u3-@h_aIXbp-lTAm7%3LB3c_^pi0$wwj#UEF9}uQb7Vqn_MXMqFM2cjh z#p^`Qa?r>p?7w~$rieX?(fkHrbGC#yo^S#?Si8;fVu&G}cZvaLwJsJQ(JFL@C>4|4 z*0%gg0B!h1zp2)E9psVUL!V)HV0QjMNDtZw>BUslAZ*Am9!Ov(u%3s=&n#u87MW#^ zV*Ljqi1iWQ0Wn4qgxJF{K_o{IgyNY*5NJ7?@c#or3<`}2z&~@TQAK*h3wxlxAYBk$^;3r3 zr|HWh(o;3P%@JKel|{N5Y#yd7ksx$s2-*q63{5jdvg#69Q3NE5$!bU=*OFxz0u_fA zlKp}JOM@5!x9kJen~LPos;(Mc0gafhcq$SY0g?ywU3@Z7t0C+geN%+dV+DOQX1Fo> zoPUFlVyOLxz6plEK|Qf=y2G7-wI!Vs(-2?F|Chdr>6a47!Dcw~d-wt8DXVFH11ry@ z;xr&xtJB|%JnS}l&>rk1>`uZN&a|!>#XHK85i@?|kjYn|9^*M=Nw+{j@Vxx*F@9Kv zgfM@>c3KJv6jsa=*ne?tt+{S%VI>W1j+E5G^S&bEk2(4T!_IcY94S&ow%-TakGWoJ zDA`pvQhhW}?-X#w=t3bDC3Pl|@W4G&vTv2DJ|MR@O|^ zs6$4y9~Kes_;>M0J_1Bgj+n&lnm?|w6rN~!govcfc9n@SX**mjo>=iq+`uDVig+}? z`N^tdgyI-=oE*VoSp~QlMTb$XM0>0hxtlC4qMCX*64L;$^;?9{Y%?X*=dLTRO6MmQ zaYj-v9!1ROB=D+8-7{h=JN04A=n{;}hCb>e`z9XZllo?INn?hfbk%fNwqz;=d?ppq zwpe1OmADS`7n_AN1OT zIOGu{rM)smL;Vyl8)(V2@YS@{`NJ&QIh0Qto<^Z_l)8ih!~zieVM~cdzPeM-U^QYu zAhUbNHNmWRym~*oeXRxIhwwLjtA+(@#@eN*1fAZ_9-K7e1KeV)))#9t)JaXeG=~_B zmeuNZUlWWe)P?<4t)0eWQ9Pm4XwlQm26$LasdP343bZ|7_20gSZI#W@ny05F1aohT zO^X<0oji;a!J&*M4~yG76sy;;Yc zPL%6db>JjWt{FxF3op}U0^nfX`0Zd^0s4nqg{vStHxHdq4tMx#t1wn|=qP{$b|?AW zwbO<++3}^?JT)jLOe&g4O`oiXN)=I7GkF-R+lnes8$+y4zpCJLEkr+T3W-)#tiYS) zEHwH!u!74G09E-7R8W-CAxTSTT`%DcKNgV+HU1&_J(2}S^4kn+;leFs!G!CWp6VY{ zt2OOEEx!kf^1FzqFfMpzBpspr57Dk(VZ(;uyZi`yzweMyN&-<*D)mpS9_u1Q7v7J$sX+ZIKs@Bw1lqIH4 z^(;aod>6H>q4kFiO9g=1!m$-w?jP>)!2T@{6#EtA0h4K7gZq!72&X|SjjwqaPD3T7 zSI~4t`nm{IL&d_y!Q37i+@ve-E&0l3;tn(Ly;|pP{#?*n*lTuI+KCLiGe%8xWF{Q_c#b}Ss^`)k1bF_C- z0QWng#9tTR`{_GFc*riZOAo?&VAHL= z@_}-{Q&+h4Bf3gKs&`*Ktf8g+?bM**Uz*1oKw{P8t6hijM(bL(-C7?D6`XsU-DsW zs!|X%>|=Nxj#i>g*P)}|-LpQ(oCv2$fT)VM>G}(dZ&~_v!lBA*#2Ypf5#;kH4(2YSn*ohkarO;L(j^-JPDk&ktAtNqxtnRh9McP`M zE)9WF0YWX}=(W8@HSKA7wjO0gl-NcfE5!bB2$Q&?QohYv8V&dBP;P{q8om%_nND6e^ z4=pQ=qELaUT@*E}AdH}>T*1n9q(@qRW0De=iAXB>H9rnPkQM=?V4B@WssIn>CB6?a>H1{)W1@}T zv;xeCnN8^$AE$Ez`;p_BOomb6$ct%U*(YKe6Nja$o`_r`03vp-)d_pouA@Yd-`;o7GwB8n|Ea9^uJdwvvM z7Qn38^*V-8(F zv6QRud;M+xVoY|uZ6s+M$zKXEg!PRgN8>@8qJEYVtGzvFq*C9pf4PogITpqCwn)?u zG?vn&_?Q@HSEG^jO4{#iTvEpND)5c(&;<20@ofBz6e^^Mm;RrWG1|rv0a4az@o9J6 z7lTZJ+agncQM%rdk*-JYjf3>iQlDDVE*=4bofMJ4+0$4maQGk=7tCwFP11D1Ka3h5 zEQ|dNmMH?Jz?1hC1O1eWkVb|JndVof=s2@IuXqBx*#RQuEKm+>6hPQN1Zum5K;_)^ z8-gZx*XAJq7ECz@94;oK*mJLC6Bxz=j8S0Juy0>|!iakDp<|ld(O(KC&BZGIzy0jn zO*L*`XJeJ?TGo)7i_7SLTLJAZEE;=_pXKVJ4x7o>I-Ij4UyK2g7`FSuy=3=Ajg#-J z9Xn}5XrHr4{!D|}8{6c+2)%V!Yt#2xY(#HMbRG~h+t-pyPRPQYiS{Z!<^eb|Gr6mU zROQ2Nkgn#{*&3N~wKF}ls*!~`@h`Z%8~BCeBb&4r<(X!sD~=LPVCG>wIW|CTQe7Rg zyK&uwiH2S3@PUqIH~ChSrQ4ec+!dNqv`gk89M4;I#kSa~tC{O~`G9Wd9rilK^vG=r zACQ*~tkLhtz)d2c(FX3Fgnme31u}5cItxXfn1S0>ez~>bSV$}{t2T=|i%%;1`$p{~ zupnYzaKNjpv5s{O9l?Z2vTX;$w(UA@V!L+QoqTdb(j1hZn2O(m^M(kbzs25ld8?m*)v|2`N@_AsymW^km5nFvx zkd*9m{i+u4YZMit#V<(8!XO#Dru$e&52)BcI$8xmfn>YyI_xh#~uL>wa*i1|4HN;9U2aP#Puzj0( z-liU&5u4`wakzk>)$HLMlU}I3?9lwoeTRbrWE zrJ&{8BJ( zc{S4+J*%v?(&ZTHtF#NZXg0;=D6?t!mV6d=28_G_#^fRvC!Q?k^+rq??&?dhL*z{g z&mC7;W)*TeLogw?KK6cx3CW7iZx|&u9XY?H5{If$cg*A^bq<9<#v~zAE8Px?1DcNe@i09p0y@Del9v#Vsd2SoApL zVoJHPO^4VZbkfle;anojH&>M^k?E02AJwJK;VyBo1li8mof}#e0Y_x#r+2KNtJv?T!6;uXiR5sMqknWM2S_h-^PSB?hUb1%w(WsxY~RrVjTiu1igVmmHe!GxyiSoqK1$i zvM=`X_YLT*gWa}_KamNv(3d%IUSg9^dJw7{XfG72>T-y`0Q$r;;HBiscI8O2g~`3$Ybaz>xae?Y9~lBYIx-kaSC$NQ_jbet=r2<7u3z5mMC_;Ly?>^RIdr^M7$V z>p*%-4f@9jWV01r$5?P^1s^R2orx(yM?-?uq1H zf0Ui>f9apFHvT#59O6~(GA(dpt6Og(G`lg0u!E-IAjN+4Tr<3BozYHIxfU8i%{LBU zFdDVBK#4!^AMicM^MqaW9L3iagc4hMl#JuFik`CBBST z43rpQMJubRHhV`wn$$@2mda7e;Myd@lw{X0#tBGq4j&9v9#gI$ub>2>fRhTF%wOU0 zD*17;k8is={oe*d-hqwl12Z?Z7CBduUPykgu0dj!N?#d?3EKDP@K+ND2%}EtwqBO(={R zAfU%QpmKRq_t?rcZ%}oX6z2y6Uj%E&o znpq>6PR8{HB%^M(7eWvC|AAzTrT-mUHNY~G;Tn_7V^4wj*%1^o+{wR1F~8J@`%w(* z6pNhXR5X{LR5OP7-biJG7a1!TS}Rg9-Z{r{DzZ~5a@fSZ9E$Q=?#Jwf*@}$|H@6w9 z*KpYD*lYqqLeGG!kbR*YHVkEj8{&{QPy`fGAhww-%Gnt#vyU+z^$0PM{w+T&{XpD; z(hnG<2BYqnnI1vGVm}fT>|?Ux^x{BrqJRaS6*&#X(u)ocHk+Ub^%103xshN|#R=5` zNibSr6LQEsi%Q9Xb&{x+lEsls1Rl%o1M3zdt}a8y@ROoG+#_P5hg}dGFVXsNX7T}wNDEf;d#NepoC@_u(XB}j8TWd1bUk5W z1ak1|vo`h}7uA3g9eC}CZP3Q2+bLO4(IPWS!7RtP2IDiK42;evP@mCaVk-`PIM0=z}3x-8l z^hU5aU~2>02o)YT94Z{S0E}^o@Uc(H7ReuisfpB$ZIPX0wes~Spj1ojOHCeYdm}J> z6%p^flXDI_?=4n#h2Yk&Th4(*9h1~fAeLOv30LPlm*C{qS# z(A;t7ivRXAhl`JGzx%^6G)0BTyHoafMU8Ci1k<MW zvh;xn7ou%P!mlHO9}_;(Wm*Ei+~0zqTv2Z*-2X4Ky>BbTN#r6-UB;zw`+0A;4JefD zBN(;2@7w9rKLkz0*c(H0eNz#dn?^$u5RHbWk&(nid6TQ<*JVaNoc+Xxsq7&&3ik`D*aSEgQQcFP7$e7d z$l?f93lh8O{4%dojd8U`d8yX$iM8m6FeuT&EKIp`&VsiaHV+>}gt-AR-r3^b)|#4-WEpv3Yieda#5~N8c`1Gz^t9OWiqtG! zpHH_o#&C&OXYPg8)T|uFu$Y^eF?AS%P>4Y=_Nm8VSOJrT&OO$xnUI>z(Ult~q%MB< z;T&0O33}l{Y$ZvUddiX5L#rd^cuSln_ zY_4uf-~%i@q^BowZ1?zyskE2;>Sx`YNvX$ma_FBXrOx*5?tc5^)F-^UHcXN-4#5c) zcx~?Dy$$2tD^pSz&)$4@lpVJNMwRb0J)~S6fz4e}`cYT-QD-JKsj1D5XybSiEfjn7 z;pV&DPcx~>-W6_dCiPxUS3WzNx|CCzcV|=CWLs7DD_MHiu>J~{7UM;K*AUZdUGvn` zWt_!%!_?FbItPAQY9*JCOiQ(Lxt7b)E33kA2Os;DCvwR5(l;f8TUEvTip$^{VGNw_yhDmvuMJOx@{K zKKiw*(%CY1N_XoN_w!k)$%SvOy@Cf=ee=mI5#HflO?daqb(ia1wGM7g=I`BorS4MI zd7rxnf6jrgWt=XRui);tJ@(EIOaAdXH3DthyPbXZt}?v)@*caZh)N#*gKJc@JiP7i zvb(bI?w-wd$0@RE{+4U(j*t!AwSC`xb$04d?{POcJ9V4)K=-wCQoh%i$!0;!FlMJg z%HA|iLQHM-p6@?w!$Hb42QR&zt7XJTmp0g>ToIjMHRUPvMxF$z`UU`~D7o zWtS#S51dZ`|H5gs1YtI<*Zw-$q(iQKGx8 zEj7lycywxa@&{Yoj^(LcN1cKFY~s?y+qulpK}A7kved62Y&HlR3*y#;QO!P{Pi^oX zbmy)}t(@~0!dz(u;@a%xlSq4^Y7|Gbd0k`K4atK4Y41}y=F0W0NHzJ&?p~hWSm`dG z-_*46&8dpQ;>==dK=6pkS7a6w;jN8NKw_871TttXN$yPI3h`LW)~BYwR65YPXG z>sp>q zI!TEHRQi1;Hhy(>4cDP@?^~VaptAf1>Qh(R zxwff92R|;i9&i}s75cH0ALnp=nm1SKK`%en>c;>-uF{X5u+^)*WT9+%+4AJ_Op`!b z?lQNG8|yxL(uCi-q35Qo+xYde57%c71r=MPd~mrMw`pZ+^4Pn*uXs}?Pnx*b`?L2a z?~mRq-jBVfq3<88Or2WT<^90RWxww|?tRbe^}g$U$J^@EGG zcdvK9*X4cHyU+Wf_k`Ey{m6UDyTiNP`|yWm95j8})SJEa-gVwh$8bWG^EP@ndN+6* zyf1jyd$)NV-mTu}y`$$p>3z;y=Y7_@);s*L%!MDg;QjA=?|a^T{(0w~bJp3P@!Gv> zysN#dytUqy-fs8Yaj8{ajq^U7YA&RfCVDx9%+j~$FtTW8P6$cNqkkomL#O#Y&ptZz z4Qy#dzwle}qY&hjP{%PO!v+JXs17je=rnF=bB|TXa`iKe}}}w9K8g*rW*u zD?%1xSet$kVMZo#|q8hnCwNdNtxWtjxa0qKmFvv2@`Ey*~`2>ev9V?QOGV$$q|CFO^U z9(Scuh#g^fl-4NdO(r? zKVwEqlxV7m8hZ_@J??AABbiG;6eBA=0^1@ahryPR$J%kjY-=7c9dNjwI6Ki9ha@n%t~JoWK$x+L!vDP{5)bpc1#{4 z)S4_SLR~6RDw3}G#MH!j^WmrjZrXvhxjHGOx%&LC5^M5hC$ZkC&%esgUt=>npu{=XO)zKo3>G14?x9OyGv$YVfRsd&Vg@IhOOC z)-A3=P=+p&kw@rc%ZV}_hNo(#Q;T@5zUC!BB#W&5oOWpP`RM0dA~K?%XS3hgV7b6s zz0AAYe$Go=go?5>@s;SVa3RPr`&exmU7G0BL&G&JYE791YVR=qL~vM;IV}417QH!m z_|01~2S;xX=V87l9I1MGcz$~b3Jt*e`VLYRTjR5m=PxNMbVMEbp!I+qCnjXrtP=4I z6GNia5W6bnF(v{gbG4Vg1_U5@xh1x$&n`;+zm!7{tMs zATL6i6iMaRyXsR@2Ng;NGn+xGvDqUB#q5-|>rm4$)i9)HolHMrgxy(ZN2brtj88v< zx~y|<42fo|^X!$9aLm5Z=Y`?#YXrGBDmSVa)ppcny?>xMoh8e_Bfu}? zv870GBzsGZSUAjUF6R}yQ{I+phLXSg_SBpSq>+X<%CZv;|J7Q9FEDg9??_EQVXtM( zgiq9kY*cnf+bNY}FH`z@*c6eVA;`$RZLxhN@He;!89tC@!Me+R`W-CbhictzTvr-; zuE5?EQ=kU8=2;n%D9G-m zB{6)lYMSEsU|Uas@%auj*kb$>1VWYW&K8)U@JMcqSaZ|dcC^q*5t&hvVKb~3H7w{2wW=lmqeMt6VTzbYj|Gy6^g0UTxv!7pc9z3CrA(e{2S!SK zv7I=X{v4(9UFLVhw1yk?dg}+k8&d=xhniIi4@2oJ{)Dn>(mAg^tRd*i(|EH(rCAC& zN!pFk)5%S2=!E-M0RQ}F#+kj#zcl~q<)69)*sE+mtz$Kn;t{FQd!n{A#(s7Ybv5!D zU4dTE*>E4JByKdnW*e@`97NhRKIm)8!(sm50Y-Uc!If+#tBG}~Gg8%+eN89@2e}Dn zVDveN$063qge#Xsh2hgRl z*>f}2fKjAgvz!o$TjIFs@W@l(+vlR+ir46Vb26NpVnhJbfeHIMo5zkZ@jQmpSglXl z4j6i#|6o|wyYX`q_#W%*5{q^=Su9)=N*l;5e;mOzjGVTOdV98e_;?$ z-C^my;1hcV+|ln!O)C%_>cvj>5*}vhTOt*B|6W7ywW1IspTISL|_O&7h)I*#Dq>53A6K|HaAg8^hS5mCI%mlyX>sgw1~&H4n|x_+)0ye$P5y6 zG`Q^2+7@Uq#I8uHL_D=f>ta(HZxVE~od4EWu_VG^$N)v1#%M%N!6-fnd77DpcQK#r zXn;?$sF33KfHI~jeP7p@BBQiVU_K>`Qp_M^qfi=bCghXG5MV*#leV-MUl`_-!%G%@ zROFMp#ufQw&$uF=EJSR?7^U%P4O!ergnhk{T5ropQakSe9Mcvz@Il-VF~@v9WC+7- z2%pqwHdFv`9HeDTqJ;YhAqXoM2YL<;G(v?(_gyPCDG{J^9^X%t2OE{3s53Pc7^b-y=J+K5V1$9k1(9b*v+VR|! z1FfA$^oaw_Nd!WSi~jRMA-?FD!RkZBK-~Q)4Qz<3Q>fO+FH`YXo*VR5bzh6oB)6wH z86*N7E{Y~c#M(w^@Sedwqn9F^x z&_O2wAdd2!R=+*nO(|Ldu6aQkI?B8qIGXpSRlIVFx2r6WbOGv#Vl#J}il?6UNZf6S zfn6V-`zwkmcbhRR5yO*C+~K*~9O)>{z!`V55x2G6&#f^(bKGsFm~gjw8wXJDDSGa< zZ{z5Di^4`3_%K%H9b71jJ_&IG%2pT_DTofFYyQL6)Iez#e93G4r zzB4hhS6Bho#4mEx>A?lzkd4noTA;ixYFxiY_z)%Me1rX@N5~ zRNMM3*-sSw^4+s^toVb^57u$x=-tl`4lUm={4u~3A?aD+aJI11gq2*Rge&5UvZeLT ze^Vj9BL~1LJV2IfTLk(|+K@77(ngx$VX*8b^bZdT-CJN`>O#h{M_KZ|m z@(hBCGCXi5F**E0_5rkV8POonU^s9gti!Vjk{y-YIxGz?WhwK9__~LuX~8WGBSRQ-db`{UC>Qi!%fVDl;A4KZN9br)#dA;*Yt2NxixffHdn5zjw>W{A#@R3a57*5u<8k3?F7j( zAv7I0A#_*ZgwWo=givzsgvWAD1hSY4mI|T!gZPSzg2N6jg(OaRa$I<6aMZKz2~Y}* zM@c>9O#3=q#e$4yNGRo#%}$Rdlj$KFWe!Lr7-i!-uO3HUycQ9^{iVU`WA>4B{S(_X zbFf8c6zQCcef41A;hm-cs{_e5b&IMF*U+FCe8Ci zfe3k>BqYmgTyNBp3*Jv)&;VRqYabV;etq zrI6K`{GI2NkM{oD=b$=K1K?=A1pMl(7&Mu2_MVhQN}K|8G~Njn%}IBek2c`n)H2zKN5@?klW_YWjw7LGuG7)Y{R(yP zdA4(?P;M9_dnTL%UFS)NCtjXL1Ve$A7SC1RUp)_Adjk%pa1@|8i$n8v9mz3^Y*F)J z0UpvT8*l-=$yDKO3!GuRrkPu$v|D zzU7yzI@jAxtd_}95BX}fND=6YOD_v5hkB~#n;R!>ihp zz;d>Hgp2Q7+s?aBTur2hcdu^>@bC!@ry;;oYYOo2iJuz+Jk=%fovMC!;hkz@e8-|q z>=83n(Z4Ez+lm)!H)(^D^5OT?|ACvbhyM@1xOdzAOZvsdroC9=o-M;}9D5@FJ!?>6 zDv;5hHI#8eEDl{GMa67l+$mVe)Sysskk@^K&;zd{Yq(&A*Xz~{L5tg3#@9eXyuNP@ zrO2pZbl2d}wb`;od9Jb0)7j;V{k}zo8e3zJ*q*t04F$^RcFP*>X=y88OVg@R2m6N* zRy|&oM6ezqI@F)WfXaP7VAWHPR`u3nQ~=d#fg=jBD#WF{1ci*sC0&H6H&iR;Hi0%> zdZ2Ka8W7R49ZjT6{0YisN9g=vcwI^@8Dkz`MqW$f2_|!mi+fKcc`p|`ZnD#KBRj-O zkLkYfNA1yh?gHekJix2jbhGioTiQHLvKN)Nm$cyo0nX&`y{f8W&e?2PS_TB&e`)ai zs`sCst$oZcZIIB3-&B+&UDWX@`Nx?ZcE<(AK3K0F1+6wYkZ@t4@m&C8CQi1M7d|z| z$@E$I6;3Yo4#USaGLPTtLu7F^)7D>tF^!VHVamsplY)D|nUjLU`9uniS$|NlJMX37 zjrCrVf`2)dPoD-axCJ3%Gzz1nSPG&g@AyJ8fsC6##!WzCJP9oT57a1R_A%sT#eE)D zAzcyVIVs^F?Lo7*I(8O+M1vQ*9BV!4nXd^B?W5g{Z+DMgXK1%C{>j$jdKl@=uL*_@ z6{(Uy3R$OSCvo+>WFqo5jDJ9*CyT@=)Uzdv<`iCMg6YN{8f7Du6((Cx7SqEIcbsb! zA-!H34>@O1YLr|>wM}50WXxzRe)eAnZ+V(iV#F$qXpL{-u6(yWV0w@Tun<&7^fog& z7a1!cs6QK{oYzskE@bNRTz`DpWx>f$J;}2yvG4Lp{PCZ;GGZrTks!Q2QT(V+lxR~# zWjZ4>Ahm@GQX0sFEJXj&a^AY|z42!+3!YF_7+hP*23COFGx3b(Q|EY9wsysSH|3p=ExKz@8%Ie%IqbHA36qD#deHf8~>A-kfy zu?|jKd81~GSsj1tZ-TQ*SedU;?+tot@4bCOu+zW1tH)h5Z9Zxe{X!nTi!e3xK|I}GI!~F_FY}&$B0!gI4 z7S=elwy2ZV+Iz)tta;zXhyGpg81IQu)(NJZTd#IoFZ4yZ^f528Lq$}ac3rJ>fs}Jz zpJvXcKiAUh1bCY0Aa8<8bZDId&D&b`b}$aykNaNj%yCXvhfJRPm%i;Q+BG-<6%YNbF&m2u9N2H9en%W`9I093ZwM7vI=W?Am0Q z0`bR{+bNhB-atW;giE&)_~Vo_7>J+!+ThS0#~0y#>8LlEPJ%5Hqm{Gk6cYz`(JkZNQ03LzP>sC$s&O9bn$lK1qi_qGe2~EiYA~eJ^ zlM#g}xk~-|Fx#e;b1~iJu6^Sf(|wg|QtC>GtA@fT?*xAp?Ybr*u6aA94ws5w{kmYF zO0iN}xcPlrvv8tCYbk^zVW@q|LG@oM$gd^XhUe6oVZb0a9hX4>7oy1(i-6++;}7qE z2r2D>N``Y>!)PnI`qo*6)8(Dv!BD2UE0A`hiIUxvfws1FRY?%haCnt>gHrz8nxGK( zq>w0FH>y~>I2ZnOqWdZb{T-e`z5UgGPy+B`QIhXAs+^hOKih*6o~8Og%Ip+0}U~w!&GGmhfNL%r)63yG77^UA@E6Z1_H> zO=#8vzn;ayVzS9>Imi0-L;}D!JFL3KaMH{_XeYRdtk>#mZURaR;W0!aV|Zgc#OaAk z<_1!BXSp`woGb2zYd%iggBt=(k(=&w>~2OCXY6V~HmDs=5Q42%G_=Z1dg&E`;?lYy zupCd-NMD+Yzg7z>?W0+wdRP3rTCi=AHjVRzi z`R$prEwG8qIFnqCkolSZur$n}7A78CGMwuw5UeV7GmYGmj~-xizr6V zx;*%kBUy*MIezk1Uy$>FI-n_r)X~h4it%SI4~}bMGkFs{X#4<*enwqQ{$3N`uq)`@ zlE_V%e_oe?L0EYTTI98or1juJ=>!R*+4w7$2LtVsTDYOB-w>>hOIHR@S=~_4lS-|Z zn)VXk`Xk8 zusPEizm=DcK+&sGHh$&R!KV1zZwQWO`n_)qDjS(TiUPE-kY}ACrxieryW?P8eC5?a zNA{-s;@7<~IO#7xejl7Ty_Ps58mx8VU*L5yZ&wTg#%PvQaoZx2IwN?MVD$x-pj_gQ z)KMCZWg7^Sm{eSPQ?UMcwn_$1ADIECwrHJ=K1Px^6Gm!jM~JmV$7jDO*svZyMC-JE zcE90D6>xRNGvBk$XZi{bg2C?FosT1opj##r-ygt5jIOu1QW`~2*oP*8~SqmZ&w7Hp8+<& z9Yr!*SyN!&)-q;lCYR=-W-h@jD?W-(kZF#{-hl7J+nvjBycIPjx)&1&1n-T{cylmV zg=mtPNrsbrT2sHOvMyLN)|S|-mCueQ7$D_37H|ZrPY5clEx(GPQnIDB1ak*CJR=Z% z+Q}`fO=Yj2O8m7q2fe2^Qbs$wBLQFCXVUJ*=^Q5RS9gS&4sdnFLsysj;>W%vSohRo zT@HiTv?mYl@Hl)(J-Y?v0#Gu-7Dlk=zzB;z%c5O&MLwF-SMkkn2_Cnc8@3;i8xHIa zPJA>>=HP(gA|^U<;K>CV0#F6SE^ZsIM-8MJ}Vb zz8WNO{M+5ZhA7bP9nEn&EnX=nYehpE%r^E|&UWOu`WivPWt1YR_AP23KkKc*<}%4| zbi3VkYY@>QVHjFxe8XFV)thf@PsymIpxS~fF&?)~I`;47`1`#r2~&T;A;l2&5@08D zxH2Ia!Wt?HW#|t-9bb86(BDUyI=+&$GLffs3*Qmnab@tt-)C*My)8KT(X7pilIq~I z`uAN(s(aE|J-v#0$d)%ww;*IKfaqdKMd@p@=Znj*L)ulkj9K|52;ffPeJ0BH3*TTK z{+k5n_3%c1f1A!RPp3e{O=<{1#$kdc;85HX!c)Brq&?5 zCO+})!C_lI)1s{%-G#pgrHp?pc?>k1`q#L&AmeRAL0o-%P_8l#R0JQ9f;8n}(IOZN zn`qmqD#rA^EA2Z4Oq$#bjl1gltBHVn*t%tRi|@2zrJk1H>kNe2>N8t00Ui6w6|CZ> zHf4DqwS+z5{ER`7&CvCd^7zq16Rkwfim)rCk;}4E?IEor}IM$x;mW8pqh5wpnz; z*79UTStI2f?2UiChw2494^<=2$I?~7;9x3^N@!ao=Mez_dE9jUCszgQH4(iqCuU@8J!I2K2UTw_I+!vo1 z2PbX+c9UiWEtqEIb~e89m4T-u$AN%9EdFsEoYseZfCo`lCf6vmyLlGk(+SC8om@By zq09}$wNV6oat@}!JwhU-+WDQLJ=>s6}(|4Q}GY3 z367&g#D;6BmoOZkdTsFMtK?IY3k8$?{TQDlZpH7qHaMk+XYx@VGnBt|)$2rS1hdRI{H z9IHIvGj+B-xVCghQHye(%=c^r^kWj{PfIO&v&buk3RKn%L4p> z8Q(bJleo`6To-J}d?vp3y5Mle;UMzh+NL8dIO6I@TH=R@2Uq8D%*aufCdj$xD7mFQ zdhY`~XCv8$rc>f#+3n9`>a;r0u&%N^=`Obk0aT5|Tel9zNbQV*7QvzcT52BPgjPQM zV5cs8knhONAs2lrV|EnqsQ>}#VlPVpxfIV*t=%&l_bS`)Bvq?YiTt)iZ!h90z_b$`phmmrL9RcCySI)8M?G;!aPTpK??i&IKMZnZ`AMBPmQ^kWZpW z9tMXPH}4>uGV#6H6i~OI2=WWEIN-EEY-up*S%dL`c3ZcO^t6ulwULo_dk2Tm)x($x z8(m|GjadG>!DndGeoH$=T4yE3<->P^dFET)vCcG)32GoQ818N7Hlmw6v)fvB4Z=Fy z0OpLd=xBR*8{RB&J=l=N zVe=C`!MMkXwpdGMNLVLt>nxpN>5uIzQ^0N1!e;4pruBR-i87Q~%AE5HI)An>j4!Y# zZ?^YhgJ-458M_wJxosEOHq0q6_m76d@sn>LOZOp-Dm4fvvo_jtzqF{wKbo|)rJiWH zh|yFMODCza?IRvIyUY;{FO=Js1Umgq{b+I^+E?C_hF0Q4#-O5D&T}oUK_~E7LDcIM zl$}cwU5{}#dlTihoj(!>Ag7}4@nkl^iK__`49s_rgAul>0`=x=golOa}a^3;)NyU93C9XOn(1{|+r{d$st_tEcAJbP18b(@Eo>(qg&qT4TY zr?YHlIFM-~aKELFY1M5yk|!1p-Q*;3I5Z9oSz%5?Dp(d%w7hrVPops4T#o7Z?H3hR zcizNt%R$uW@%YZ0g7tCjy+LQy`K3)u)O|SFmW=j`6@JD4FS;NO@W-PI?iF_a@#Tc^ zKcx#Qbb(2QE)GH$)dLg8ADJ$$_%6$PaaZr&*ad@jvXQc%2ebzHXc?@w0#mfvUP9Y=V-U-(+a1@3-BL&NL}QkNsc=xGbSE3^thN*65_hR=;J)i(eCYdv z^;Ja)J!(+XZ#U&)bQO4G&f8k9U1bR;NQV3U-a_rMxwe}%nM8q=#_XlqCuK zR=v1Q;=TvmT)z)jC5^}g8n49SoHz?@Z%I3A;;PkV9`#tYQcO3+#EXFN%I9q#fSZPJF^0X4cnAijWt>aG5F}D<*BE zgnaYvq(m+Ia_U4oX=GU&Jic`cif8o_+%x7>z|&@j|K1F*L`Q17F`a>hKFWfM_XR_@Qih= zg*>z6j-i}6+0R#83#-MAwc?Dsd6c-;T@eWEe-|&s8#0I0QJA9*Fe3Ob^#L%-{emFOZH+m z_e#X-$Tv<1*r!FDGx_~w96@o4!oSd_iBnOKAPp^;*Ntarw)0UI63HQ zLNLv%SrwH%isihsX*n~?mlHB$D$T3;MRnr-51$9g1e#ZqKBSH{EvKDjbt%a$w3h3o zzRxYNi-muW_#R8FTGlR8T}e0z&-xf!VOz`MfSFU8?*coR+%%vQ*KnCPM2;-;rkc55 z3`6un8d&{V^0p8%F?yRFARw3tKW_YGZHXs)*njAh?< zl{eUM-)jLgh4Nr|14mwwUCy;G_e_R?pahhqPZ=7xs#~Gt09qqY{k(MqLRnF&mYQd9 zhFk9G-{(+VE~2NMj-!Nhl>%QOB&*#f9Ems*w`5%#^A|ySviZngWU;+1d+WH9Q`ovDg|1i9>$iqM7yvfIT}P_QI?4ffIMrGINeti!^De&$g%QwS_1%rN^avgFWqc7Y4zSJxwY(pY*kd%$IE(mjKWw|#N9ehODJ(Mo@ZLh-X&~blAb)&1+M@v=92G}+ns}B1C`$RFYlmE zqV^@uU~PLG*UB&8#T}CAYz*4TWNd^gzvJAJ&;6h7t$s98n@TBxEd>_G#k>O8aHcQ;eLiA^sJc zPFYhn0yk_c92YRPbOx0GoX30q96dR3>@1!30U~CwoA6E>@-D?D5_vQw_XI{5XIjF~ zBmp{c83|s=Fs!>gJ60+z>3y<;#5OZj>?CZ_QYbu~b}rF4DP&=}#cX>y1z9_%Ge{wJ zZX3OmnVHmiumor05=wAZQW-djFZwtYWjI{R@^eZ;v_}qrwkgOQr@sod81tCJaCI;2C!fyXSj;BlSI4Ps4CCW)6~>B zX{GfJrk;B<=SDz+*YotTUXIF`Gl7R=Q96^T$m2hYO~zoH1GHL8`nk6(Sn5Lx{Z#@* z;lCv?lme$Dpvg6~V^A91WeZC|g1hYe1wfqO;vwM|=2rhaz{~rrQo~bC}GU z?u#$_RB#L_3k{wW@=Mwx*iusz)V7~(3{{ZBWR+66S`S?61$6#BiX5*~j7TeLM{Y1{ zb-=CL8IP_aHY$W-r*lwMlh$84C3 zIc%UdJ0?J)sd85MgM)=-v|C=DA+9-3PCUGh-3trTH7{^b`3A&TaNn4sAevBCi+z|w zpzDGcsH=YInLH}%P9tU=#D`7=8~Q{vcPEEw!K^($e)3eX zX^W%IgduhM6Bl;SL5v5glvlXa9-n_6s_0T_1AroP5hlxO;kJ@bAW(AeVNU84#j{;F z+zA<3MD6Z?uJe(=Cjo)$NE1QxkOZKNHt{sKQJ4e@log$nk->6?UvPAGg*wcTL9>Dm#ZnQ#WOYAigRwdl9hq$^x+^$&oK#cJ5KI5=Ee|G| zI$RWmrpI)2|19a^xiUlLhDWDwK%Oyn?$u@Od5No z-EutIv3w}GRSwTvS9QnIsgnd>wXJi##UYb`uUuqOOMk;MSuP0rr}5uZVDLNYYd5a( zyTuD64%JCIEq%s%!<*n9RM_w zA@E@84f_n(+^~QZeeQdB#vc>@4jar3#*F#d2}fu9o7SUEC6=3w_GI8kOkiqIX?{HH zuGR5d@50u@08fqX8cj_dNkOV9e~jI_t>yC77}RD+vldbb(5<;o2?b@j`Ul~0Qm9YiQ2yx^6gxDT!8wEv76eRI>e8o zy%2b(j9IHGQt6lckdb9R$0_7ID3|Y%KBX`g19Y4(=N%diN4v`EjNH^%BEY$RY*7>s z_=0hg{`h&f2Yru&DXd_&^X;I+o=&#vibcmIS{;`t*SSQ#JQ%P2TyV(dE!r%3&kK|+ zEpxq+nCm*wC0-Ce_jAGECeKQ>o#~bvDre73*in7o$M0g;CniRD)BY|q@w!oN`&*2m zsQ;f}M)@`>IyTS07PtO838($yL`YTVZB58f8?K2Cr`K zPAPGmZ`4o6wTI+CuNxBGwvz2!YlfRF-9@kv$QTit>Zhf=gzVAHz0+4i$imT9n}qSx-QB}FgQ zq;&!c9~Bv=Ir;c>+vWlNAKY9tJ&5ep7EdcB*Zm(d1O#%O#hL+NY9I!HXR(Vbv)IJ{ zfeP;iNjWS$QfS z;}SyTXIqI}WDg3tLpIB%JXQp%R~~ATSU}t{9;1NVAnnK*uBBYzUTmo zPmbA{{v*NtCtf*)IgqCY7vJigN6Fz%?ILYfw~&%m!hH(eUO91FSP{__34158f8Bl< zOWZ=lw)qzyb^TyLem-pq?;WaiL<&;6oGtf*bYCCb%-#BA(XNvLjkc0x6kOJ9B}LDB z8p3>%?M{2bh+7ust7hGqImYy-%L7{f0l8bkJUwef#+fo2n6Uuch0r|_G#}<;QzT(N z+D%u*?xA&y^I=bims_SlZE#tf&!}7x9%Yf6M0QG08s@`vDvDuB3a4SdVS%My2Y^p; zBPo7SbDXceY%xhW3z9e=wBhCl)CthdVLrm~v{0Y-I1-03KmZ^BJN(5%DHW7hb zK2iS;vCK2-T*R&ZmpY|oB*)XOI;C8R$?^2mh=;+uJkb9ruB&=Kd0x(T2Gud6=5wrO zG8ow{VJA2c|L2rcCF8?AZ#g9i8-HweQgvwqjg+J~v!f)5V%eX|7>Y!Jv4(>(6gc@K zGL&i)$1~Y4meL$+wwx2k9ujIYDY1l_nq$p!HZ-#o`ZTi?h-J%Cht5H%XcA$C=v$RsE$i#E_->5#ld73jz3ARDtB$}WPjg%%&JbjLq=MOWX+sF&Sl zDs7gNJGZT+4K<0E9PW>xd?uin7Qm6H+gcICR>+O=3~~6MB-IDum(nlg$k558djuop z>Re?F^a#fl(3H*~Q23gtR?}r5GioJUM%)}xms|!SxZ(Zy7WxTBDOC35_WENjJHw7K z62{Ewl!juA3$mW2@8}i7o12&(lyd_uM6O?piBAy0R2!Y4k=LYvVHu_g9nl7=i_3HZ z?)}0o`of~&SkXL0r#f>k#y-sp^l@ZUpyy)+dymE~=rty{{tKVjB%^5>DxHH1_z1Hg z$cLC*_$+V=8Ym*jH<=V z2_i|W+DEiHb8(^4c`3^YyHV}S0_HOz7pr~Qeonba?Msfflne0<@9OA_fA}@B;=AQ3 zGZfyglmXCV0a83rL%p)4G^o5_sI+AbZJuq$=!HBS7=+WDC(wNCQBf5H%e`Wr9Z~TT zlHNEAf}@w09FWN%7?R0GSxMF{e)YYvLH_nU$i~}sLYqs6Tf&iIKJA2IHN8pRi))U>|ix; zD$1?d<2)I7x)nUGl(qXRTcY%Kx-Iy$>~^{=fyDJPGJ5rrX9SE31SBJ1&$PIVgi2Sv zXIF&pQ<-`I-$qEj+#MhCb?Qm=#HW5eIK1lgDY_=N*zMD#CA$|v2%$R5jcd5XZjOW$ ztageBg9|x};2=A>A}?3okY+%XLmsIG-~c1RF*_=zAQ_((;8blR{wn=uGqT*Q4|%Wq z{9!SI$pXQ18twbI@4jIBUroqnVK-hURh2sdNA;O8NAwv&HJe5;R;?qdhu?z~yK*zq z9vqg5!I1MhW!&xeApUUnAj?X(_UCsw<(K3&4uPPSyPKJ|Jt&aa(Tjs<|mTlnYt zVrFy8r)P+wM}QpDgg0Vx^!kUPT=n6-;K2!s!KM4yWC3NLre0pheR9I5irM0btdwx<~qQvV1nhXvbl6q z6eyZW!ZGk4=m(3t)g2a-5hbr5^}s}O@~x>nC>w-c_y&V>*&{rUCaT7a1gbh|lTZ9_ z{H&6=Gg?4b&dN214YS|F^6{#>ZV-nvw!?{RcZj4c-5$RP(5)7GF{|AOXe_?+ev;7( zKtx|&i2p^$De=m$M{Bg#f-34tO%O>oqlCRm2(1)w#EHe8P$_vl@ZqtQj&)W@k$gxR z>qxyS)hC$ZN4BXIUpvCWeiOaN88bHlqvR7eEBTOI4N0Qz9n_Gl!0<|dBRu6zrvNP?#=|@e|0Fhsr~OwS(`a>Tb5VI*+PmrSICJ<9+ov@7j^sK<2#z z5M@iW=f^Q;5>>rA{AkqEg{sX#Yj;&C6+~6hbvczmW#vcZe1&hPA$wdN&b86lXlp|U zkdP#wPFTmf+oIE#+(vzd4<|9nsnyZp zLe(QR`fUpm9(HNfh}IPv5=KMsic9uTF@E895Zvkgj5+7RTxlaf9~O`|+V=AY{-ieX zU9M<>5VjFXt}U>dRa`==$#d-l|JRs`?bkT?Zm^aW)nWReend7t_q)N?suU^-WU|Wa zKz73cCQH_{m^QylIgeH?NnMcf?O-%I)w;`RtqtJ955URoWd1fnxsJ6WZfl!+yX9Fl zh12o9c_mDbk)A_G@o~iyQQ5v`g2e)tjBSBSp-75|>!>(tNA!uG5PCU2yw_SU48(u> zV6ZOxvw?W$gTa=jvjp=*yBrIM+Lc;|5wmm zmB&_yP}Eh-Q-0eV^k`K4$f+HI%fR~i7b%%`YCW~Xq&F2*WkoZjIXgU)Z?wXTRvW&- zX_M9zJpFrCzCmRd9%GrK+FsHigj2U<+U@yh_sv&_(Hnoox@`$(ZQUrzk<@UQ`d+XB zI*-5oJx=|lo5;rjl1(HA>v)c~kYlk)a4re1fJ_1tW|tD|r8bB_kk$)0;nxqki@xf2 zQGCUJ2bCu}AS4Eiskvyk=knPz9T)_>jrA>YUb9L>W)v!cfZ!hnmUIbu}`y9q-6cbk#_P#rT`^!G=p7b?VUl<8KIg ztSecG>^5!2=s2{jOhS7-7v7h!b!4hPfyPR?+}s0-w$FV(INB4^qg+GBvuwrE zA_Wr``l}zY0h;f%Mu%P+QEE-mYGNDk?GNQR^MhiS_<@sFt0bDF3p;Yq9*^JfgJ8>9 zjkEv(%S|Ca2F{B|U`h4W|H%)61FpXJs8BhyF?Z~Mt8b1LxB5bO^XiWs(lroYcx+Gm zBa=Y#*zqeNfuw3_0|^kMTFPP$S{@)iW+do{gMC4ma`feaBFASOsrUd9=^BraRCinx zVw#ic{I+lfN&LpBnsi`iG$z&A&VOXin`ZKT=IvL6m_F&wFwxG=(43?)A$gQk$Dc9| z6-6t<_rkc8Q+JL_zT$i$^-D;q^Sb~A(AYOB7VNL=KGYNHAeKvZMq4k26FFb|Se|fi zJ&!JB`0>b(C_Qx`CGpN51>4t)nP;Qx`F_K+Y9{1#UBTQ`Ez7|oKxHzf8|HP zp~@bj{m6>#n^c!CC#uWZG9&C{gu88XtL)G|YB41o+w|#Fbv3Q$OKI2Q7t%gIv_3_r z*t<^g*M1z7dy3)x&11zKKM4+5<1aSCU1+ISvsM5Nt~DB;{*&OBXUj!LQBE@xm|)lO znQUvWt-T}P8Bi~s6o~UznurAf7M7>71cohkSCtvy-uka8+36knHQVS4a(fm~uybFBs z8p7tZj;e(6#jpB@6qHs)eDRuod?UrbmToJZ!AW3^l8f%Y>gsCfD>W15wFkQPHp~ES z!&j>xoUl?d2K$xPX^X2JW~l`qn%VDpP*&;C|E2uv*C<-45Z-8wv|} z$S8qEO)iE!U(V&cgpZ_u_$|B|md%mJZG&>Ml}eFY_ZLLH(m+i;z%lh^o?($od2Si` zE)GUB^4tJNCa!2@{r(WdR0>sYCsIhly%Z>WPJmdhAeZNPqJXww~ImB+} zl@jSu`(+ESQH~F9cnb)&w{I`%h*_>&n>j^e^bNCj64TZyCYm4&!aE)fB zR5X)Ys;EOI1hbNM=(A2maZdgDIk$g6B23ZUtPwXo?R+|~Z`?N7LbrVxlKfU^nb(=LWSH%=HBD__F zD>kGIp=$^L0gWH~^WezCS^r}|{V5JlWk#~M=fT!NPo_G- z#L;gf)3bSBiof#nV9S=0sIbGR5TWq5m2MHC5-DqF9Yh+C7(sb`_||yn7s1BUs4Ka% zQV~}H7<^my?h-)Dmn&4VBsK4Y8G(_($|`rQtMtf)nu)kPYa*`wA~^Q3jmAAm6Kw>R(Y2`!lkA4x94dT_6^6A$LXb@Z0o%8QQ`88_QbP!2~XP0liSOI$j}a#T}O3kckh-t z#t4A$#nfc{mx~li8;$?ZVo*L7j8ZgOGd`X3MF0RY7xYS(+x0p=d39)q(e~)mia38!edC@Szys}@LLgKi?qMRO<%-jl|1AF9?}9UVIpbl@T8_ql^>DD|uqM1X(IlKVczR0l z#KXa}VLUxi?L9F-M8jO!`C4E`$d96LC_Ef-=^zKAHeTG)am>o_Jw~i1STtP2BDwuhLSx~ow-g5YuY4OeM1PX2vl@&ErmH3k z8?!gP?W%7T)@Mpr{id*HOG*5xSK*8-l}xlaI$JC$mW!sXY_k?cGAmQq*lop>LG5&| z5=LLTKYm4qt^2e1{h7isxiH$dFaCC>aO$A=^$Z*{8gq4st;Bf{ekwjTTR6Puwv5x6 z?9R<-_vLZ?(rjU<-ia#(%33PbMOUZyWDA=zSI5&?U>=SCD_b~pl^!ssnlST}81A6e z8{_`g!cb;cd`fHK#LVUK%UcUa()gOz!g=dtiG9Jzn~q9>7#IQi+X+TVYCqgscp~sW zk^0MnKZCZSQBhDdoQ5w%#qY@#PS5;f{6Ma7NM_g7ZG{(X07I`qCpfj`L0F*O0D;)y z-5i%%{w%Jw6^`KHH@6jz=H=^ch0UcV(uv89``gS%;y1Jx&OIl!wI9vmn;N^q;>SH5 zh02Bp9-zinZ?fb|rP!m? z7%nl)6+?R$|E{C(ccA=l^Mx~+!5#U+ro-!4oNWV(EYP-=d)mNeDQewWfUd#GWkcO@ zTW8@cF#7n;1f!EI5pt_ts*?gj!suA1Vf5Rb9;55ywLxLaOS+{4N`OB=b^&*X0T2l~ z%9L34J*&6k_vnHvevclQ4qS?F4hrY>mfX2*?V?EtJlkzmrSP3K&0DO-QP~#?b4|a{S3cA*lAk zGQ0RP)P6LRsn;8Wd`%rPQaZeWB-a4Cf|ck(v24f**ZjPJgid$IM^G)X0pwq=8%Qz| ziMraUM41Qum@ybgRl=-GEF`CTItyv3fg~d`iTXMNNv306HPy%il|Z^_cliRWBBTU*QNFq*JAi&mtfWuTR@8eIp2`&s^M;6DO1|LO zkuNkVv))Xk!{ckZ3WrrKTZcum_>s#{X&}SwWzqgOaBS4(t&%hJB6$tJx90wY~TL$x2?fmfGrXT@z0o+O_%+DH|?@&KQ7d8l;Zz$=*2ATUwL3B(AI~997*b zs8m;5!@4hpsJe3gZjqR-piCso@VuXRw4BqLvyPxnhB!2Q%SwMG4~sy-ODZ z?LXq{naZP7$^B2W8_B4=)fK@emMW(nJa090FZ#oS7yi%^C3aEBUj5aD1B%dRO@L9! zWkea10JslNymV6w=n|JuilxOD#zaR?=v zy8V_UidFAV#$jqIn4~G*qF6R-qEmF1fy$;f`uJdLC3i%2qb)AWF_N(>>nqsNTg`ev zFuO3cLS(EuI{Vcu&-Tn*Zyl|8V?paZ%I{L$5305yHj;3;rF|*dqckvy4~+zoa_9Gd zvBNxNK*Ek_;UHX!@KP4Dx#E|IN<_FK!G3xV0-cemc6)LXiw(c$SitH7v4D@Pus83p@0#PR;&sLi!v9X>+Bu~U+jog# zWr|+a@sy^{?8LOY-P>Eg@lWiWVc}a@&iBGdSeLsj6rRy$XmwlyE0vh%DE-s+sVYN>6AT5>X=kuOE;G4PTXs3k3Tey?^u3r1 zuWuM6+00SC)1=;*=ElO%w`5PT{4qOy5#=t0&bSAN434Un#KCOA*rudf=844+{zsZv zF3yKJV7D&gC^X571eP>1KtY{PD%U|dcWQ{q^Oks#&nrh?=)eFmZZS8&C`odNiIx_!gGUi2o zRr$)bBuVxP+y>#Evj2OGOUxS<<%& zZiFQNb(-_KR`}is6%b#+^Jd)DCa%<+pi?po-|rnr5A9KO&J+=SO<{e;7^hgC4v^~~ zUB;!whgU)8E>#+UNGNJNgJ&|KH$L{8g@L2kLiIqjXw?x8X7O_Q`{gx-!Sifu*$%NB zde+F5F7e82Vh-2q7rRBZk;Ot&(L2*`qBlNoZJ~D?r=WQ-kSm5h#fWX;9Too(+vDwsdQ+1 zG{H@_UO80pEc(DeBu#YYT0(eMDc8D2w#*S9#4&~hZMy2$1We zCjb*JL8aVEPJn3VTVj=Ay^cx~Xf%@6lb$CuT|;800XTUV1st!lT}CYeZ`VHj4&|`w zt43;*)?r;zx7dL!aDnz;)UmlJ_)P!M?WO|}s{DxU7n}9Mqo~Oizw@HPs_N~&Eci!w z_3^EW2-ecVSj$FAhR#tEVN9k+K=_c&*XsAYDIS$nF_xdF`Jfpr&GRKhkom>(;SVt3 z5JdYRI+$U*-PR&bC&kpUMpbT6J2kt9>sdJhCp=P4W&k@U>k=5<7W|>^H8h)H+Dyn? zxX-$g^xdvhYVF`@7>stXU{M}+M^yxSL4`yX?$sne051G%7=cya(?&>b{9&&5m5mQ> z6t{G#x{Os5bAy*kI_On5@kYH!2VOCJvO7*~d8gh2^7mmJSmf_ei`QpQ%P4wTwbMoH zCVi<4YDqdwMHEQQb9;siYVjY!hQP_qNGD}Cis7HyqGB{{CsNi=p+(TGcq5m$tE5Rj zx`OEEpYd%6`Q!Y;F$?v7HpUYNm{Q{@Q|_#WKT2;`6Cp}N2-vYTaYM9AS@;_Ce!Zd{ z&hV=El?fh@DlI{CTNqS`05VrQsr+?QP_v+vS;|Fq!@-y$)-pV*YBy%wid77+<|BH% z6h=WK)mI#fmUd1ZJ#VR1r!%TIRsNOhw~JD-WFOv{m9S-ZvL|r*n&7kp(Au3#_~{dD zYe(PvB*}A0StgQgZI#EBbnBJEN8V7e>S%Kdo#eKz;3|{_6h!Uvpl>s|nd(_)WICNt zK3PZX#8jcb9~;%~TiKsxT{z7lf7%Ca$c4u4+h7l_|3fSHH}OsRBlZ74d_%dgvC5^t z5~7{uP4#G53m-V2)DV8FA0ub=6Mb()dOVCn>PS?Bq{sCI#)FZE5(Qv@q8Q#nxexlc zTTjaFTV38R?fR6c%89b9`L?!~Px0^0)=$UrC;41H&cCyvINP?TulyK6I7NI9=(Qam zLwL^z`$Ri3Wn@RRoR+y+uMqJVud-xE?R#W_z}`XU8})@V>P=ySj4aBFJK~G%L$?!W zDD4MTn#0HWCDveoxcad3{c&eSUeJE2sxSx+?pa9{iou<0%C7V35OtDqktA^AY@-Mc zL}FTp<0{lylES0f2_!LiT@;)fqp34S>5RIZ({T%TENpL8NesH<2*_a8oG;h}N4_pw zypX|+{}Fq2P9|8ScGfhTP8A?l*oD_H+xVoz3OlO63t5QEf#>MncN1;NhA&D}uUl-(N}>Z+lCO;g;4YFE zr|?n^1<_qgH)f`r7ZthiCtgIThm#O9Do4_$B2V|MRw;Hdguatag_b}zy5vVwrTJ_m zYl@acglRjx&bixt(A1)d-@b_~gA`_CP)f#vgTS7MOF4Ipz89(|di|Qg&7*&rD@|09l*Wmwtar8C>n1uV#8mpXXD+sxKHR#43G|mcPs@=1La{l1EhFHv zO%tX$nGaqbrZXqFC#s2D&t)N;oP)Hcv*sA^F0y0pl;6O|@c?vYG% zw`~e7wtWsrvy#GkPW*)<3f=8}=+cE^+_RAHkAHqdVc-QMtcWgAr|dVZbml%=rwH*t_D+qY8%y4ht74)_v+xg%b|8!TMF@ z5n=TjM854QfWg8TgrW3>u#9##jMiLxW@vrunb@73Fqii7`b#;lELKt4p5XwqIDj&b zjA4Z0X4jDz1B3+axF(6cE*3(eMfH(iB_na(L+Xxpp>(%$-;UCS#5ePoVV=NFGnYOA zT3j+zGLmil`dE!XhK?q5n}`Ltc2>@(ZetU)Vr-XsSUD#w$Vkx;OfS$me10tJd={s^ zFROHN8eB(6t(DclAL_k|N(Q*S53-GW)(yeqRzPe23TPd)*}$v?q|l*jNrY0woy+DD zYg&Q{kV*+vF)QLJu&Ls6*v^!V2h%A^ca)y97L0}-qYSqqO5HdfrDA&;uMwpJ&|IJ? zN--$}ZT#iyHmvRK6%<<9q;K)UxO8-3uq!OvPYqZ*?x0u`F*+&?>QorN>FB~C)G_K5 zcEr%y;TSlmZ{p0aF|`CSVrNiWE5wQWy@eMvi5xU$B5)`lSyj0{zA&}ei0)hvo2ZB7 zTqAaxN9$+-1pzr!M{O?*RS6kocDfxPfY2EoK%UEzr=;i8+_6t9k^pGUuwYyHCv$Bw z5`}Ak$VSyU-Sb3oeh~OF@&2+yVaXLFj_=-HC~TA`6Cewv=h+02Snecv|2k>j&(#QF z{Oj$7BTs$~TDU2;!&L&*MhC%*iUQ!FDN^oLoFHSP4o`*OKlUWHP*RUdY$Uvu6Kh7BiO|tC`sj z(ow|HHlgb%%7IQ-A!~wf#q$yBTDcDHePF9lVP(y+g$lXjRufq28-gZr9;qgkWveQL zi%->xBc@;ATwN`tH2tFI)RwZ;`1A`-I&PIbNWXv+oT6iO=@+X{Z8;mZd6a2>Bt|EO zQOI=uLRJ{?m|HjySzxZL-WmV&T|KM2q^(Q$uW(}}%23B~IP|AhQxrD*S!bHYnJNB- zSy8f;oP{k}0Yk#B`+vaw5w0w(nK&->6C64>_xpeU)p`L+E2j&C(o+TVBQMueo0seV z=d1Pf|Ap0h`Txu1^WAr^-2K)+b^V9c|0e4{($v4- z_usQ}|65%55q1BUY$3kwue$oDulRH_Z$UUknd^Y;vMOHn!=C zFtq4X>ey({{)HB-8KW%Xqn*b1BGWkX51z)`@5G7?4myqMiqp7W9qU&7Ii+VTpN42j zBEy)wSW;tDN^Z#X#%?e@#RAmbX~Hua-IzX$bdXO- zhBP`(7-oS%J?EcpyV}o0RBC5Q0}(m0Q{f_ua#r6K<7wY1T#Y>g7PAK6oe@-&(mG8 zGhqY6s4yf=^t7tu&#xxX}RYi__fVl9-5A#9WJ z_>l6Q4_&+IJ`|V8U20T%GR-~?5~jt!fMv?~*tVAM4v@mE^b!%8^Z6+Eh7=QHC~^ra zq*}qb>iGPrEuT|YdVJP~s*;+_8_O>eVj&8D$eR4pTlN&NfG@lfwaBgLe2t$@S z3HmfO@MOs?$ZV5@^PKFZl>*Lg%|$-6eQ+T9%;#!hH7bN0H$jqG5c+h@!h+Dy`?#lj zOT}7*6VY=*vxE;Z)LK(_a+9y3MDy7TTX-^$%dX;_#Qo>8$x46-^L1bcl$B%-d)U>R+kLblAF-cA< zQWMz_%{<5l(Ta%`CJe1UAir4Zp$(iuMj_QErBjeu$z?qrpL(fy9C=P|OldLVLdlpm zSxNHdA*Tjb7}B^hq5Tf;>z+~W>GASYYbo~*I41)#A#jz0%bn-i@F;387vMrM3gthV z2YglrCJYONgDqz{aRX?(+Y2&ItKKz?*?0*a3v zrIxCjcdlO8?RF*8 zNW76i%%d@7C?O=6NU4J%n2GDQlU;43mQpYPk|wGE}Z#8D1K;*kE!yH>=IuJQJYm;I&VYp zWSLq1C=9ly%MpUFINL={g;Tp`hUs!GtZW*yW^56B0-|TokV`H^%S#+n*)vTw7=wWF zPTe)EjhgnUZ=b;2tWpkv+AKG+WKHFrQ+qeXT_(qHkmQs}tu5mLqm2X1gzEHQ{K_-? z2k^krNMuDt4JR`k1!Jq=rtn}HzcTAAYY$=5>=GvJ+0u&g(PAs4%q3>{>RQ4#fIU-s z62vQ4bh_Wpl{FtBJIZ@HUEJf{23oUY_J%|U7?0nE3Q$GHC-f)6f&Ln#eJb5m^SWNB zk`mYLrK4!=2;WZvaBVl#WdgOUT|39ZHEn*>yx1RkyI8WiJ!2f5(YA5I#IXdmi6)WE zje@zy@jTJz1VUO?q-S>;mv!NB*s@6hBb&uG@y zg@SqGB1;~xN8)x|s;|Sn6kko!{X5da5k4pWXl?gDKutI6!nU#0bXk>B)9n^Bm|WD2 z`YpBqZw}wcrq++jO_!~vQ+z_wTv%PEe{TaElOCq&Ip!)oEY-h_l~Uf=+lq{cYGe#a z450xk^b3QE*A%LI--pkxJ5jBVET5Br_@<2W03ONHZBh-;eqWu^YgK3gafIFv6ryAu z89(KG4%J#x>PF9&2Yf2CEcGSq*{!%^&!pE4ui}Bq#1?zTF8AztbX7BZep9AMwiZjz z-djd_T6$ut{J_56N5LH1*G}6L6R5i%k4j458W}5bI!m<;IL|+ul4g0+AQu+A${hRJ z>ePu4Cv|MbtRGdE7Hbm7v@np_CLDVdF*{k1jlPLauc5~J{Z);K1phlUnT?Kc;%*}S zSu${|170$KT*5a7i3PG%bb-iGHev*pWhF+(d3UD{4?I?T471C7vKE$T0O0Qk;cv8q zs!GUw-PYL8|LdtYHFrXM9%pZW%p=Y&cI~tn>@?cK@Fd9zol*4Gs%;f=Ud%}Jf~k5Y`{7D zJ8#48@x@ytQ1};rxln40Yah&D?B+J6!J$ z*SpPnw^{Et>D?y1J5uj9>fI4~cNp(jU=QkllZ}@vR`lyp`gKT%B2_0`#Hd_UqZ;y} z+7YQpQD^uAUFJ5%HyzP%YvKy%u#G>W8WSCe_*OEfbs}P%W-1I%TA9^1E|Q5XNu6 zux~Z~Tl`}=+t7iYH%bA!mLeHL^|c(+?2_o2BlPj`eet(&fi@4{p0-A0+3=$_d`*8z@@sS8sh|!do`5&n z=D{0}doC#S9yQj62F;r5cXR+FA*DV|z}oyq63A^k!-wNPzkrmUPXHC5o}Qf)cvFnd zw4+HHygtw(-n9%aK{M3`e^iGVI%r9!>41hxmm^wd+O(vHHZJL`+Rhv3vwDQ>PGN{x zn|@m4%aw?Hf78=7WK<(3!jFYv@3As$U(RW0fmdtySQ&At5`NJQBmf+4XWxrGjJDHH zc#2>4Ok6-?e8TT|Cw`^qy04B}Zg)DXv3AYskj7b!Ym5ndG}UeVtfCqFVzRxNP^V}s z{q#`9l6q>o59cyQ2i1vSG#eYjd7U=De`HzY;lE z)FmJ-Hh@$ExatwDoWjA8zz(H@Uyr%Z5ZKHoRO)g!JU$IJ#2p=!X(B4^@KkDFE+(*z zb|h4~y#JIn6>L@Ku5~+S0)P^HP1?J$jv?yTfd)Ew*dF*2h%0#5_AVcL#*MANSgIbj zS^IK=zlOJ&1h%>A@&Ojz080=z-vFCf9j7KchTbjMYNVTa*nV{{cK&G zleS~)pnmp8ps^Hs`rEtwQ8VsQ`U@TfE1=HWmlOPTcuYW@Ti#z)sG8|dIHR1!d;;DA zZXKz1 zXI=?Nl(Pf4(N6i#*mQN+F25huz37+uuH|D-+t?HO3)}Rt&DfXY{58}~JJ`@~XO|D4 z3VY4C({j!=z{cToV4LZ5`obdZJ>I^0Hv%x(y8D+8O<6F8p4S6lrJaX5#8I%0Z&eS_ zfU8kk4K!@ejKmYqRc<@!prZjT#%QO78spHK>h*`8f-1IW#)75mLxF-o?&bzvZl+5L zl`Y=5J#O6ADIb0Zo`lbO$rvigii{5`|CFts&q$)}n<^-*Q$ciw3B0MmE`?k1hw?vV z|NbVld@iaxV+YlqfO*=(PPgzq_Q<_Wk93j(TE`hSBe;=tG^KjMOTGKTa5EF26OwM{ zelFnmBUF2d&emzPoV8Q+87?g6W!#6S1 z_ROBblHOH;PL(_3e70JyxWAngYM|`vRLRs)_H0A5Rp*wltO))$?}K;OTaN;epnoT> z6i=$_WU<{+Fw%`XUjP!WxAvTZtZU1oMZg~`Y7(RT^@h)seKB5aRzfCdE0m2DPi~!5 zziFCzX3?ZA?=3cNVm&XKp()12qy}bV6ul6z=S=X|ZHS_07IVu7sFC{23O9fRB(p#= zFQ`OC3k~2c3cO*uc<@XHR1$lor))5L>)%yV0-0RZnM<*jH-g8-))u~mvzT!+eIKBUU)kLm;s_!(?~Spqz7S~ zF{o6prx?b}GhhRZ(9{~$x>ct^ zOeEg^V&*hLP3KXnphsi0(40oOtS~W_#=149b&YcxSKA4D@VLfwu8bM`VzRq=M^DjK zsF@&@Vp{!XY367Ai|?2QnwS+$5bJ@_oDDgxznB%>ZpLzFgLeMfWahg4j2D*oAI9~_ zS}p;Jpj--4jS62fE4tm58KHv*?y6h4_=DX0LoXDzgEv`h&C7!Li5GKDOW_v?N~0oM7)sJ175UCn^iRKFR=lI=dzOxj=$y4tY%JVOoZ=MlbN1z=hPGYU z<4KxCk%oC1XaF`x0BAwaCI5oI$`a`eKWvM%e_01h%dk-wdn!-lcGy63CJ{rG(aA28 zqGFuO>c$w(xFw8R9XNg_U+1=#J*y}w&pVxAyKa@+Jfo}POW$8ulQ|*Iyp+)G2@QIw zR9-d~N@GJfDAh`{b}Fevz{zEAS?+D2*e@)g2=aHtZdXw5xM_m?9o|}JcTB&w71MT` zTj%+$BAlu;)e52OF*IC-uqThB+vIUOx!Ru8a94mP8ap0$mOQL9SU67lFF!5N$W=>X z_q4R=OAW<_2)`k1 zcZ`6H(FE{s;Tp;hASQ{ctZJk-+qV2VM%E44IV=yIB)G<1@CZ6W;!viirC42K1WS57t?R4WPgkIz|IXT1+4r=8V2A;)vdO@J4yBB&h_v zAAG@LeA?;p8Lud88nm5Eu#{#a{I$nV^iq@jA6`*7Wa(ojdn@l|{!|EcPnv0X5&>Nh zQGe1s@8(JE@$4%oO>7TYw1>fuo^)Om@6@z= zonLK?_8$Mceuw3j=t?)bp-*ibd)&s>4Z1({%rjN*a$(cSRXe7yQn3c7zjKI76t|6|;o7njYM@WpEFN1h)I zlT39*2Q@pONP&b1Iscsaw(ARP|F>m+<^MgIfBA1&n9Bc-%s*|7ya0XJq)`^P2wYKq zjA@aU^F+YnbQT};Q?>s{!Z@FDVHod7QFBUE58#xL75i14$CFbcJ=){gzb zB_^*|RHII|BD@gi!8{Zyvb@wqp{(Exw_%gy=$s)u(D)!-gOKjZ<7|m$5wlW|P$|kglgjS?-YfXuR@KuMPq=?8(e553 zf|d{PXSce7Zxka)Yr4y0MZG4wPH2}NM-qir7TxwrA{ zxlz&p>#(j*ph5^ngr*u`0plHe&8lr7kDik04IrH(ln$g5duf;S#b(-$P~?Glzn%&d zNA-#6CWRwh)a=S=CH~ANF-0fB4;TEi888ngkJ{*ZSv7uQmn#$BrH3x=Ef4*k3f<8{ z2+>-)11`9vCoS`&jQYEz2UpeKM>7wKprcjZtu1_jx<;hIUdYt6@yMe{j9ysM8;)LR zFTK3Cygm4?d`p%s)!`Tcm3VfPgzbaFCD){-G*H^EBt-||81seWnjsv!kp0(E8t|(0 z#DyN&t7AZ#o&uXhDK1R)>q*TxWiw9lx5cRpvvy(n!jW>1PvUoSVn*-71XC9xJR%L9 z<4(UMVXX$fK7hh`dsIb#(aJCMTDe`E;jeA{`3n=RJUc1*7md(w3hmfMlyFHw2jT@F zUJ~>YcgvaA7`;d+<@V$)KKJR29dYwTtD|*;?Yg+~t7Nb_^O#v6paklPi#*g*YfR#I zt^r-C0a}1g16+dd8NS2TE^;A|*$l+*dUFKGU0z>1zRsu38MHI{i!phTL;Sp5oaV0~ zelB^1g|#P*#C_}?Mp4&m5I=tr8o6FW@3WM7pJl#>W89t~{=3I`izJ$Or1w{VFmWjrig zToWf+THm~TbKkr8D)>*r5`(#J&$F)TB5RoP7}h+np5eYKp$BKaE!t}(;ia*C>snh> zRMrZX{R>OuFZNhf5em}i%uA0H zcK$a-WQ3gkaOSZ~`8Iax2;Lk8XDllWRd#=a;LHNU!Z~}w!v2g&I)ABDDwLcc{_d~A z59TgS?b2mE$%wzvBftE&)a6UONK5;jIk~(aVOVgA&g`X0jHcl9oXVwU$f4Y@?Bpq3 zDm|y*^c?%3{8}D3u6bKw%lShpjk3xbu|)ige3!iGYI=N7df}Nz>20&{kqf=3&`{h*Ye>uCn8ts+q3GT^X$4|7p z<(`C_II~I>V`Wy+mP-tz*e8xm^yTP2K2hl>_qpBW9}u1T1K@r31Sd$05sjuC0#n_wwpABk6-HDc6x(L^;mA2 z9pZ^p0 z+u2#ST@}b$w;iu0cs;Cl{9uLKZa%t{+GbKWFJ-Op(5Wn$Ggn#2)NL2SdYRv}@uGAQgs`0o`$41h8KZQF8V7Yl*l&%Auy3u{FN1UggU=L< z?_Lig)3d$-2&#gV&^1?yeeQQeD~YByfh*C=MVEj>&)wO5dOlw&U^Sexw_KE3??XTH z`@~72;T|2Ga@ylQR7c&s$j!3kEBi)*Kk1ew|M2>_ZMek9L{SuzS#XbK9v7&7bba#> zH8-Nb!M@J{m`oZj`~wczeZhYw=a#SP>RU~GMCKCd&p6p2G!l|8&z;T71tX;(xG~T9 z0OJIKLK2khAMU53T~DdsOzv=8Out$n6_!qQL43G~2c{S!szFAQv*61E%svn<<}}g- z+G-{MB}Q>2%{IA?XluwmCS#>77=V*v!aZ~Xkb2lW3kA-ntTDLMPG|-PqPZ8gGS6l; z4+s=kqQ)rom09;3MGbJdI7~d(T6lqjc87}MaI1>@@iuJP#(0?ApG?WA)N7q|;7&?s zt5bONCRAXnI^I^2!+lz+^9;Y^ytBI=?Q&AMvR@Ao`e*hlsccqnnq~&ga_>Ywcpx!5 z67;TO3*mZ4b-pZLDn@AIP`(~TF;0ZGPm6zG{|T)R)MDFmbNiWy?;-U+I3Pg?k=at% zTnB7CZwtxM@fz5om0m9iZTy1!*?Lml=lA1=QB)4p0pD=Dt7N#Uy54XMN{OxURb9PO zJEWZKmO@`@>WTm9t%aSZT?KstjHLIEp@KeMM$*n`RfL-!w>y6qY)hi5YbO2e@5*_fnn_<-!vt>lee&S451lwzTQ%TVh*pw$jXFR7AF@O>kM!ah=F{> zfxP;&I#P(oY9ZchRC@YAhIa&ebGu-_lsQJ$h2sXTi7if@(0qukHhyAowI}!HZo0(w zB+BZQ`H4&5M)LMkyz36Whm#pR@Xv`$a620|jxxP6DAqy;^9xRnM;2VD$EbmIgh2j< zQ2(K;x~OnEAf7znxFr$2OW?unyW?k!7kcy4mzYsfo)RcT)@iK`M+PJ~pw4`fFkp`5 z%D(uz@j_p8LUQO(w`q-XBL1YwDH%`jBy|N5=J4c4@%ojFPl})6b}qp!Jrwjl^R=j* z1GWEZsD1FnE2l}(IvuWjU!nZ&_{#A@XMWKkEn3h+h~69078)UKBv?+o@arW`3$&~Y zfl))}n2XdfK^B8l?OAmLcc?*hr#l6Qe*qse>Px$ww@)*EmM!B4YEzag;<&qcQ*TfF z(VN)$?&Kbep2=E0hgl}ikuaX%Z-;aHSM0*nIaTXX3hXrZO7u*ggBhuDv)o1yMaT8t zF-nU2ro{7nPBzf5=Y@0JiXu3piBWpa@m^;AGo0qHMXKk{@xIQ5l@pAft6Q@mNpJ$m zqM$PUIs@Laz#C(T2haSoO&#(1zg;-bWeH5!d`ror$Im58;I+REl4ct{XyRNu&M|ea zkuRvw7d>=FUp!7Kd=KZU554naq}C-z7#}!y1y1JeNfrIY1U=WGBwDbmGyFB2%mWxt zpyT|?2}XC<=hVTzy9TgGt2J&DFFVM_c00%>?BY@a z*!XS_*ktVl8b8wzu&LcuTut*GSNd*u%&Ag^EqdT18SnqGm2=P9+|&At)7-;0=N`yk zL)vTt+x*J$%kI_;Z3D>MZfqWbH*q$Lx|jiUiSOa)o)mZA`8Oi}t?~PBu`G!DZz&9% z?bc^vPYoWm$(VS251CSX@T|ta|1QoQpXMyb^Uhqx!#`D6mh-BC_*S9+6weS`1xsL^ z!o_n&g1b4bfe7b(M!Dbo&n^K1U6yVDO$V^2pQzl2i=XJ+hu^;ro_cW=1Q9#o}TY}Eo|``FbRxZx%C?>Brv*%*uj4!j52*0?L2l{m9UiW6tM_Y4n|+Aa<& z25V|2PV&!8n(DDVY9mZ7rjni5=qo5`a_DsLL+6cu42OBvpKbg$?ZYIf@mSCD*kaEL zfnm|{(>$4HWoAIyaC7e&|)6!&+aJ?S!D&9PvRcvE^RyjmeJ#E#cUDK z|6F$O!k%(qa|I#{ON2xaA-$tLY5Kw99tJPj;6+q852cQt=eX+< z+;xoa;RH8&;Z`Gc?>Mi#-5$L?d2~2wbB^gRi9-|TIpH>CS4a740%Y<$FWjbAPB6<& zwcyNTz^U+^ya)j`$BiDSkGuzoo2mlIoKY33dG19G*I#r257dHPoZ+tlHJ<>rm`qR| zKYD&WP)mRyP=uppzM}@5=VFm#+-q>XWF)LfayW8+{X&J~+{+3$xW>-+nt)wgO5hql zKhXr@8{bvv@0d8Bu=)9qm&}7L|~_k87iQHG7PUcko9Qf~WoG|S*i|3PUuMx|PkQTDUh~zxWvXz`M zH$qZ!W=3JE$~N(-8UA_`cEK*pZmTt6=eSp_u(FM0ca2-*Hpio*@Rpiv_ib6=EMT={ zQ_kxzP;PU)aM>=-@z;3t(l*ZvN4KvSKe=5nWb4pisJb{p3>58nje!zspi z8S?Iru|wM92ahHu?2OGf&R=`<^ma_x9q%jb$j@$zuY6x&z-FAw%s8(Z7ppV+toAp& zFPU-KW}HiAT+)p9Fyoc=KyL@=9e`>u>q`PN-@~aLDa=w_K6CmoG3DB`#O=m6yFopYW#{eZr6K<};l2Fiq|p z3H}Odu`TQ3T0r-YGaU-A7622;i71? zBsjph9*Ufv(?iEO#qRbi|Cr;+E-(4!l96`@vBtmd02>Vfup_KIzuToJ!y`#sVW6ca zlc_FVIKljGMI$>eKjt@vKb3dK$vlVYCrZQO36nOLdp+!|NS^Si5_*Z9Zkl9LcNlEi z-p+b>w{gotDJ&k$9_3i=B!cr$`vg8krn+j=W_o}&%AG5crYx0)hZ1(%ZiU`g z97`ad!JKl+`5^zCzjc)pbS!~5x570p&x0o`&m;A!Dpq(B#}nc|h9~b%cXOn30p=Fb z6Y$Mq!ltDaPUd(5=ls&+37sss4k(mG9}vFs#L2mo3#Fc#qI9ddSk98D#Z0@fQd0V0 zFiKSFqNe3_lZg_T2)~zTLCe0}oqwcM9(c5n3*8chqr?6I+s$sIa?057dMO>FvWYBr zCOtenj4UR$HEg9je9rvM!|baLmoq;;Z0d49DqL9(xpAJmJ^0UpFJdYW1m!>S@y-ty zT4{Q^{eYC`aDuyVe44Q7@y6)naCwNkSK=;39b9TioPgl3{N#|s#Bge-P61RnZSzj? z*DD)yc420Cq&&2zIf8D%l~H0v@4yVx!w51Bo##$3&ug;e9~>^1PY6~y0V`av2hQp* zk&j`giY(g2O2mf=*M(s(T$fI6^6~OWiTCiYq3rOH}c4qLL zgF90Aq9%X)f%3_aNH}kxSM(Rtz3kRv!7k45*WPD7d7s4$0Se(kUXpIVtGv8!XpI;NIZ4jOQYnE-DAO_#1_j@BA+SWQ<`#%C%GIVmmh~ zF0i`Y+Td%J5-Nj414Exm-XRkh%Z?5=fg*+^{N*ucy#LV7Q0 ziNa@Z03~nu0RCz;$`&3l%>z1WlQ!nKl+a4>RwI%J)=ea&A{mj)X$136#oZq-Y+apB z!jG>>CqtIWwl`NQ4<#uN^9CL>?;4?rpi6m}UoT3G@Tf?;OH$O%|2?#eSS?F%Wqv^j zU(CEb|6JHL61>e&@Gvi=w1>5Ysj0POuk{1+hSE?r`vG{tbT#`y#lMH?$Qw5GDKiZp zjR<0K7uTS=UGX1%k`_Bt@$H`|Y(Ib1jg(j!IDwQIcVc{h<5O$mTZtiJCr3yZG^C6v zZ*ctVDn;hY)HBJ?DaAaO^k+rw%#ruPQ>?o67>g^kcbe!=M#m26Tc!etnU^7 z>&%+3XvvXI1WfZ@8390u_3Sc74__$n{_gxeXDdxIT*|zB@Ei8pkDTzeBNoPJL75Al z5ig%jV1d_1#XQ^c3^~Y1;v|utmeP)qy}2CWAYrth|nX%)7ilF~V15=ed`+g3Qbp&61$Jn3fXoqe$Wbvy?z;A66! z$je*Ba*BWD4+~vu$X`-xr0=^r*0ZWkIKM?gc9y?R&u8~@JVC!Ka?R_!H8mO@-eQ~I z6S`&$muwQ73Rd>KE#7TtSLZafQU8|2ZMe050%34@3;s5$(1l}L1JAf) zz*fv1c2rNp>oghZad&Oa_`~M4PF$SiG0q?M;8qK$wkH0t7UvJM@G9V^{;+xObN;Zx z<+*XWt9imhdJ;g5$FEe$yC-j;HQ>20b?qZexo6GZK9v9k)v zt{YXAkTIH+z|{Hct?MbfFy3G5%Aep~?TxX1idRo+;57I7P3e5)J{|4u{_LL81H&1c za#DZEBmLss(y z-@_?-I@B<*_=bvQG<~>I82BG9qp6~~#qCX`8Q#>3hRu^%3MQSQe=J=xJqj{Un%9%m z#=<{o=>~RRS3G=g;k+L)tAE+1Q}Z+TRs#tabiF<^Pv(6FoY}5E6^|neESl# z6GG7>O+^ysmq>~~`$NJcvK}UAG~(*G9IdCyR8maxOGZ+#y)rX0$C#(tAacd??iw_H z-H=h+LW)BVDjQ%%@R5Ihx8OwrOzV@Sy+_x15BlElNp!*`4IJ2ddG20aqCJY=b^L?A zTmc<T~wnnk6R9n|8PYja%dPhEy$g^Pn|_9O?n z|13o)IM5YgMOgDMkCQYzE?dQ!j&mV<>gRjt?=@1Z~8iaQ=ExF=H1d zX;Z$j2AV1E)&$YS5Xrk5O2giSE!8+;j}6%gu6jTQOmMsLr!$4s}ifu zUt?smL!Ob%*Uc7rv0t5TnK4yZ0dWop7X+?h(ZemUmjw3c<`nEN9ARH1#la45UK@Ym z7z5kF_(NYPbUKggxLsIGKpxxdL7q6lOhV)MX4ZkkRyyp3Q#Q|n=J8(1yG}3zX;)L5 zl|~j$GggE(X>*K0rmlAR^yak}VQ=oEo6TDhKEO!l>I;uozaHZ&0Gj)V%`zuA&nA4z2Ainv9{21D8~JveJs&!g)M!$}Jou=38a6jy-e- zAMa!ah<0gTraYrN=RL=Aa`(s$lw;syL9k zx|KRn1u`K+uIhSCq0sMZTv{lUJ9g(I$iw3LkVjV$3g}KN4y5E!FM6ySnTeiJ+yzz1 zy9}nPG8e8qJBW}Fs~dA}g$!H073eqxwCb%bltOMbvxMwqi43jl@MiHV()c7sjWkT~ixq5s~q;2yCyp@GiL}(Es*N&-Vp7_2yk>i`IW`N~ak6mD8@KiMk64{-nmJdY+5iR8| z7z(NLzl&~4xzT;d8CeVo*wGJfm^J6Jcl{|%`uWLPppHIV6wxf!z{*?HN2MXs7SMzn&Y=tuY>aMo< z-47JDMwNm%Su2{ji@Q|{#w9yTX1nMZ%ii2)Bs}4R@WbO=(2~yb8P3^QPgSfFZBO3K z^^2MwAhU%L-T@*_fz*P6g~8?vuJ$FILH1|-!(T7_68$Xi{CZ()anckEkcDCq-_Mok zJ$8kZC+w1NvE4ESrAQ(E9_2O>whnX)q9uLm(5PkY(tGW76vI>|`hl>W@4w@}cktaC zn_(y4Tf@TZUl)#!zUg%o4AqlIIZC3ZE%7$t!{(qoT+@Zb6&fMk2m5;rrXP+oLT12N zv6|>RUew-hs!7sbJ3WA)k)pKUX0jIt+N-gtYcEOGW2yVbet&g~)fe>ORRYJQp zQww*MDElb{*-aoMARP8RY6S@hHLU`zB(li+Ka6p#G*w3|EQhqPAQ~}tvp1?`ca!qt zQ2t{ItKfPVDpASAn2`%?RSV_{?1f85b`jsD+Nki9{~`Je!10xGsw+d#`Z z%P7VQNWyz6INXQUnZr57BhZ1gLz46hS97EcYw?``3|@ItHiSz9>7&CjyMiCryHpIA z1)RR4^cR;RrS{j1q3v}U9MKoSaJ9^*`XhvwMVO)d41SonXGB(};a~{~H@bW07K0NiE4pV?z)=zFfm?#=x{Kpk^- zwYxjwtNe+b;ct8=b9=>ZXSq7UHnnA|qizwmQIHD%(=dB5K{YD|NnjLB_hvT9s8O?8 zzmqyqyCwdMokgV%Pj*k7?Py#*x0#(ShU^w0LW1El?1(T=Sr{na7EgY&(6__VtO!sduh z(TstplkK%Abm(0h;^mW{VfmgRlSX@DK&wM2Yk&xc+Yl$-4?}X^wK!$G<+*Y6SA}hR z*an?YfRVw~Ymvd@i^Lz&qMrdI-lq=OKXjUrxObPbuG>N620VzIuzz7=M6#>HnO;L= zvx%S#xx%oa5wJF%_;Y4L%8Ge98P}vwOy7!#;4?a zMgLy1`C*eJH4*XW8IR~0;1-UFQ0hgD_HmzDe&`XSeWpjVW5@dBzEEYcc8UJsS4)-v zFih5SXCy;!@Kyu-fk6k4L=nVV5(|qx=qtd{Yf4$DhFDdkYzWm=rCkLJ!S}-w zL98inNvkZ6l7*lgnT9dQi5Z^9b9uT!IMxY-s;lT{921&(;RmYvY}1w^T$M#p0YA6) zpjS%fF@0+7g>LoVLq$;U6uQEdbMc*ttX)m5IrLX_%qf$#iIg+NKsxGK;PocSeN2Ek(F29tyVd*X(V=_1=7wHCqRhHws|6_AhQmtn(0COc#-a?S-4^sllbfqeEkiJ1DJ8QtUyWl(grzDye zqpW&f1-4E-uYvg{4IIjh!vgCW{v%V*CwqqfI91Pu)LQKvoXXZowDU^eGvk}CDfC|P zM_xbwW7E%}Dgw>gyv7rmA~CpZCEKK2qQj$SqF2Y?_{YLv(?tBw|5zCQDRNlCxlhrm z`2$JCNFgYcQkoAxuWc-iy*;>vO(Lr%*~qRuH#hoG!S=8~Nf1>O> zVneyq+H~3}>@PF5!K^VAXu*byv=nb{@TwDbjkl8VqRleZW8NZK zQo1F=mN7Y{4XZRWHm}=6>3bdb#4moyi67%7vjgR})U7j#E+&21h)=6camOUl+IE9` z1v5W`>*S}~`uZr``oDm`9fiSRdzMu8UpoXIIIGeU=AM_m{ziWt2oYq0lo0sMA@wR22ivMu3T^Q|LP zW_c?_0QofbINA_6nLLg++LTi@!}s~&cSViI+pb1tX#BX1pRO5yi64J=5jh5s&|!Ug|Rix>%};#7?Vq0Crj_})h_sP}sJ2?W>D z#Tgl~_Px0;VMT;ZSF?NrqQQ52!e_|UI)_C>_$5OkwM zseWLL#|(!?kNo<*pyPaR8drvd2-wdMuiI`8w~Rz@SFn#qrxG3NHhM?6w>$qr`*Y)W zeXZCVS4KNq<2U|G;oQi)8!)Fc zv|p1a#+YT=@;o?mhGtD=mL$n|?%EPHId8f|2hh2;wq3G{PC;EZAqOLjq}zE5M4t({ zlT-EFrc&$gy%}QX3CM|H1}P@VY#fxOwVt;|&-7uk+IAaX+47uXccLk$^372dwGYW2 zU{MzbggYnKhA}Ofo;aOkv8l=lWUB!7jeFjl$$IK!r+M%`)#20tm?c-(iy27&U4zml zxZFVovi8x2-B?6J!^L8!9TJAtS*_QUgD^VC%DF{r2}kpa@K^q=aDJ3^W3qy_gG4Zm znNDT_Q#;2kU~6>&L6_{88m1%7fSnlblGOt7CM@UtA7oueuq)d_PVY!95m-*~s;>5U zVG))?p6%)ccsjrbnX7_d3VsMDV$B<@_XXl_EOrhGdLytCFw6v9U%wgWm!+Dsz%Wdh zpl6ugA-fI}sKz0CWoOr>s91cwamdD66^Fw=yr$rhQnwlrI2@8caWXi3@VCLCRqi>h zIOMP)EnP2IlOLWAa%_ys=eX^({mi0r=?kE8dsTj)7IGXag36Zg(fkPH-4Y&dmigNS zV~!8~%l5tvu+xcr{N0}mde4(Vg2P126$3RTu+5g*B`#}%Qq&W_<0sm-zN*|w%z^lH zS?9D`UOy7P=}>T%81`%*3`I(`Sx9Q3QSM19547Q9f72m&Es=TtE4Y8~Jc8xLkur7y zjIEcdDm`)OpEvbKZPNFFd(_~@xsn_1u&SGu>ekOwsD5&-gfFkz8y2sna!+sAel1l{ z`}tS7sGqi#ir(y}UdwQIvy>E)3=g28beH-^F39DaInm8)440_e(Cy>y!_h`=?NB%1 zjlpj=i>>IiZ5co2wNzZes5&%UYwQS(Bp^2~CGeBV5ixAsB`{ z4F1vOQu;17!4Tlt!QGMvG8}Ds9mOazFer^JSK97iVin89Qm0njU~H!_LMQ9zFL0*f zTI|*iU@MckhAeERzNtzxCB*}^j<#YOwBW~_rG|-29e(Bz{+yy|fh;383o3#{dJ2e8 z6m}Otb_|7-K_}xY;BFYUYD#u#bMRFfm|O)B1Hz;E;f>+MMb;iGvFYbtf zbg3Tl?n30aX{y7$#dfkUg&rk6v2>cyt=xw2!z`|O_GsU0NdbHsAC|r=bp++joD?)J z9>faj(u>L#Hb0|zTmPn`_^ePB4$L|d!OUQF<>>+akogMk!P*j)*6Y18sJsUI7O%m^ z_dHT4M&|gE(?ZS?EElh{N)>JoZ~13*>hppR+ter%E!xef1wU`;S7u**hAImbhS-yx ze#O&|Nb7EB!2CCK6w|w$(Ub_}Pka#n$F9f30poq0T{|Ns5o_t8I9A?xgfPxP(3!{$70r8Kp)lIsG~}^ zpq^z>SnJ)JgOXZHy?Qpuk{g*(Ut3R2FT}pCF-+?8+KgcW@uo7zND@Ah&;=D&on@KP zZGRx^K^B#;TJ@UODK{IQgTK%R79?h$NVjVHQ z@tQ(kQ!&1BC-!4v5fs@iQy6VZJJfop4jOpYVi1WinQJ^kJJKoaMdy;hdYtQ1eUTOD zN)4^v8gxOwDkSdEC4pl25Rib??erlJ`Ms zg(m_MwtrHzTtdi5J}F(>8+c0BhIQfxBVD@%7XSIY&WV_vjwcv1vgH0j1dIepBszP? zXJvceMLgMFEd4~rrBS~=?Ml5yJmrwHMmR0fjMlq|JsriZIvYv4y-PtV7o7oFWwF=g z<9Bow2cMn#9*R02cRe>aUtUAk#AtQ=qpS??9)GZ-xGhR2vEq>W$q@|9DJ2xC4R^^+ zyD=dc=Xy|J)rl2CDekPgxha>Ea411wg+niDdUPXUQ)r1asExQa&M9ri74iM1a{Dv= z%aw*p!_X!Cq6w`nS2~A(@H*lqC~LXoXBe6LIliV?+|hJSJnBB)T`Zn^#Ta38@tIM5 zB6JS(8IFN?2|r?x-a|hYBoQ4SmM@fE9XE9qhb~WXVfl^ALX%&&H00+)tv=!>7)8Am z%OH!p`icWhzZpke#dGWM`iO0Y+la*{=}Y*gT_oMasW2&TZ8dB@wkd(q#htE@)&{bB zN5CIWLG53;=qzTcfK|h0+z7*xNrCxu(lzB>WvMTdCEyc6+?^ny$VG`NBaBJY-dj zRd`fe@bBoDhm2%RBQeG_D&-NCL?IRq7C)NYA}F1D#T#ogKw)B?USM68XNRoxr!Amg z5ilCtm?GG?-9}4|ta{hvqjeto)txh$R8baRVQBtVa~n;~<%&;kOoGv)LsV#x79^=> zk$i6Cz%Ws0r7kEMy{LUc3WFVLbV2}Fy17S(8Z87hPo~4NZewd>JHDaxBb3)kGD-EL zk}>7E(tgu9F}>yNpmRw_k_uXVL%*f#D!}7%2vYyhoF@^+4Wy10B_`OY4CF9rgPhMr z*-Z}i2fqpkZJB_xIehIs;veoQ_H9YQ{~O(j{hNg6ek9(wt+3@4V{!)z`os6F$Sj>3 z9IgOAf@W~|-jihumsWh>`d$lMD=5uA;J9tt^XsZEbgx7h36I4MBHW?K)N@HT%OSNv zw^~GqtlL%*w6cYaaiGWh%#-?kKGx(o$D*N}+~#DbYl$X%T@s;o{lj<*6IRL02-CAN zdz}vOrF<}@-QX<%w}ayOO~p$hb~w+2Gn=aIFi~ z8!?DMnJa)#{ewnzcYgiOufL(xvsERt5DY!ln}$2k{n>4HB zkH%!p|Av^Uv4)lJr5T}8pPBVwX2N;muhd+y-OpbUkISwGgzk~>*+F#JjLrw?XDT2l z{Vk!NZXb;`?UAj8jOclnpOz`G`=UQ83X&s{UGLyJ#$JU)aX+Kfmh0zV{c zfal_p?-cvUhl{s>XjPAHKZGXQ{L8^;Mz4Jij1xP(O^auNu}suH9gNu?ybaV^kWf#8 z;;?wS6%KT`Fs!;9j>AJLM8v(sG3{P(&`$=cOG@PAi(wYp`V(Q6&-;Z@7Fd{;K4O?= zWh`ki!FWrUJ4i~OZfOl$e%Tx-U(*DNW@}TR>qH*?L`O|87f=hJ-)uqll%Ze#BA3mG zHRN?ir!s%G$uZn~_#hYy|3K#po<%?CVcb)n)v`Ea;Hs|A*Di~(kS}p9{-@2wz84k4 zW;=H@j0gyiG@*dVY1mY1Mt9fxODW59aN(g2iFr<+_LZCPc!2#YnjUC1{Z(4c94sGw zD1OP7;?|37&T_NX%J>8C%!Ir1EomAF@7!#&G+)tlJidKP@vO^?#emmU0%k3xW}@`K zfZ>Xr<0=Z^&wlCV{=T2I6pR%{a_Upq;p(O91z0_Q9eKw=;gR-g-3!MU8Uwq9u#lHA2w->kWKW_N- zl6fHr-riYu=i-tDa5p%$(iUJge(|>AIXEDGd0TPVp_GMO6{SjiI4P96cs&Z*(BqBL z^hvk=Z%|4WBi@`H zh+LI)bU_n~j?Wo?kzRdPFfQIMuWz$wJF|oF_ztB0L#~JtV)crqd)x*x_QdkD|5Zon z2IF1gN4Cyv*yKJucb@(*BCm8ZMjWLXNqM7<5;LhEHRza_jHP3JVw|4Qq?wT7knD5M zk4VBu_ojcaMoglem=j!fk~o9JzVj>`9$GOXoN3ivrc;_UmI%BF7T<Q1PPiJoqNx-Hzm3FCGBu=I`NWpyG~JC9czBkvY~^LNM3ha2Qc>z8 zY}$j5f^@^0f08B2i@E#rj)=JKgE%+ma^y!bYU6>eo~^qzNz=&^50ozRiuv%7930Ul zX5zPZqM=;;iNu!!n=FL?p?hxbbnohh*(D8wl@C=}xkrdt`Lx4m36Iv@##8{kc+A%8 znsmKfTCGEN4QJ0{`7Tf1$$EkdThFP6SeF2qivM}ISdQFda36XB!ZyOPs1YEUU3Anx zhTV}k2ecsCO**uk{8tJ=#x0%UP7eLh6gc0MlVFsfI}ZDn3*q(Vhi=*vrAcDMNXOrY zKEjW=a&tDbEGv$-q~ge>^K!FNaGDi`O1#!5lZg`vOIix;9I!I#5?)+D2^bZMg3I87 z#l|=0VSbCMGXQs;F(h>Tu+KPk(FwoNp>iVpbaLC-^^Vm-la9RKnUAvRINYVfa;X4p zl#Ro}8gP83z&-jtPH*8Pb_A){kP_wqO|sUxs&$&r!h7TUuiF*?b#?L8(Qy5oJ7?7eJRR$Iuhx$M zop%J(uHUnKO(?kaiUs^z4NTY1#2Oh@>G~C$<9)k|gFoh0&>Reto*%S?$8;tD1`Km< zJ{g5+>(9$w?*xuHotlGNJK?QzVjvEi&9MWl$Xv)qte<5+w_c4R@4 zx^W0kPApilDcYJmYMh!z?S~{Ewl23&85SAF)>!K%N~A2*(f7sz9t|jXBTP{J8S0hq zh)d3~DD@d8$6xV?s`l+NWEIzJ_AC|<+G$s0cE`FRR&}uaqR}DNCc^yUQ&-KSJERj% zAQRMmDlb+X6;9Zkq3)!XR|6h347S(BIgOsV2Gr8$s+}g**H6};l@dcs~cw? zCPjs0k82w<&jWI1FpYVb0IMdgB*UW;J%W;~j*7mI_FcA?ToCbx|C zCAVDPH*CH`Zcr{2T*?0Gd8Th_pp4Zttda9uCcQhj&HWY;^T?Gh7sh4>lo*Fki?_vQ zt;THpfzyTb%H__JGgqw`qy2MSkv(BnpSA)?>`y0<-RV?$Y#)?cF!9_*e36Sc%zGEF zZ6VLv2?u((B`J6(+~}Uo2lrKd=>RQ8F^^vzU#@m7pLTNH9g)_MF|cV%Nyy3-FfreF z4!OeV;*I8*b{Pc8-BKQeM0dA2$!*bbl53)h@gsL;yd}83GlO82N%-H*Ws(WW0qqXn znOOxy6zqaHNpMG}xZ=ZCD_~{IPO%JLscLzMyNTVo8;=Zm{^(916K{oLEqbCc#9KLm z9Dj9RQHKbq0xuS(B!oZm^F^SP03qfNo@*c;CKh#coa1|;>X1K-u@!02$w0FWghq$vhY z{=3yn;*t3CgLm81cFb7rlO!I=?Lp+6$MpfpYj`;cBE53C;4&HPO*t(s>v9s<*M7Sx zBSG%Vyxf$8nyD;z4zh}RH{$NXioG28ta&-HFVp{$qu7~8x-gL3OpvOtVo)D*)Jv4C zu$(;vblHslJuJpO8KFEgmkA{WavbsodD5gI4ehzrk`KsD0btX%y~CaJ@HNRA*El5# za}PgFgJg}C+GGvKTV9BC%|viPiHqN+ORt>>_(*>hfZM5!F$rJB2pyp zB>wh`iUaXiE-h}l;JEFM$-0x#*Az9RT=Tqh0;%FWW>k4jJW%Xs*G61O3Ydj$WpT+mIj>_VOgH& zw#xF96)}YtFI^W2WmbgLbdLw()QXr;W`n7dx4SxflXlMx_as)tWO~bc1F|}uKISw9 zf#9BpMZpkB;6_$NdUvfAaj{fUlDo``zSvMLFeTNO&zfqT)NyzEzME|d)XEY7Q<1k4ZuS%5^OA*TR)T-F?v_a*jQR?S zUiBx)J79LCDk!@3G=qZCnZ?7xz_bfo^%++`G&MSwu0hdL4kuQJ(n!0)jde1`xho3Z zn7B(ipiv3sahzJ!>>8BOd}yG(KmO@w6OoXXZneEKCAv~A zPnpkV8Q94t>TU^obZ2T7c*9^(KyXrEZ|?7VaQC5{w<}V$;o9yEt=Bi{zgEtMK|-ox z#Ao`>=H@F?;|aAdILr!gj9PlQPA+qwu-FnDRUEU6Gau6U2Q24C)I=c#vutFd>WdZ9 zl9^er`)ipDew&j@rGg*xvufBxqpAb%))8}f0|I<{qsjG0UTMzBHnVMliB^kua16b9NFIk3G;3&~Jf! z8Hmmk%@<$%R9UuFDCZ4IXEOc8CQ*x(9%Wv0*fa%^v+tI5pP~8Rb(ceeV;(DChV2hR z%*gRs`V_3I%d+~JBDsWq)$mzoR!v#wwrWauAL?-k*`%(9J&??UseT5r`J zO;^pvO&IUexGFv-(s8%Dagn8nGEkVQT{a~{>Ik1h@uaJ4m9a3L!CSGRGAOH0spu*8 z`k(2Q3w#Z9Mh2=ZWSEMXdp=*6D^+vP^dDZ=EJ7h(HnLZL()_AcNZ|v+KcjPg#-J>x zTFfWAkE|$g<|NOx#(s~$e*KXi+_er|Cm!%$Wb8nCDAw75j#klwbckzJJJ3Lyj=%XM z#XXU;0PBt7pV3(wTxu)Xgj~Nm1=bq8pKW?k(;QratY_W723`rVx?k&;HnJ^=-(-!! z=w6Kn+gMXPoHMaoyl&utAd8S7%b+=^yl= z^}ri!%S>}ma?5Dyio1RQQSa6fZpY06!9CA$Fs>*|H1$>_d@sGbfnr20Z>JcUNC3|% z8s%U?u}DjQd&MYQ3mk>$5r6!{ifv1j1!S%W+k79RPWY;d(UkmKC`NW-1`<&<5c7E< zS^ZmzmD(0E_bS}PD@Kp{wGfRh_ee1sQwA8HD9^~aT1;Ua(AX$FQ$Uo7O#-p^AM$q zoYu$`A>*8i0cggb7%;8d*bvH#L5?&u*{d6; z>_#(+xN&dpy;OUZPuW>(0L?wua}SnCo$K*{6FiF3m`3AoycBnQT7%a~)&n|xYzNH^ zFIgn$v=R>X_@b+erO09ym?a(~Jlyz>HkUIz20_6G4d4Z@jtDRduCO2X=5FTjY!lF! zhryQ0jjd<~870A;QY$4?wTWfrn9isj67GzUNC%ajo>)GmLG$p=OR~5KjD(j{#XEuz zbp{`^EC(l9jlP+5hDmqlj}L0k6TKxPjS=3G0VHitRVpa847YLOoRi)MMI2H_qh>ok zp~$hRuSUK%uqs}`H(^JB1ah^rSi)SY{<`B2B6v5#lwRS%X-MrNn~+%PknwBzjz$ zXliF6U;|<9il#X+(hbkx44cww-LBvO!Q6n;!emp4s#v*1m{--c$n-T&f3rp%PMNII zOY(kpz|FR<;uo5c`~8eWFKYb#2Q0lz{)P-YF>ZoJ=edb#Qc;r*gK$n;1FGBI*n~Om z2~VvE8JMQ%LI$a(=;URsrkG!$DLNW<62Y8SFR&vsI#^T}Kp>a+_fnyZR&5)>4WITV zay7&tMufto5W_A?3A6`JzvNs{9ac;6P8<$~# zPSM8XsGYG;I*?JrKF0jbKobAviytpN?83-P*QFlYj!yC?8doCaCF4!_I>oB0an&eR zJ_Uo9-kqO8;~JGF;Bjmc*~PQRQf=xj1H)Z}Sxr1Sy(`hU#?yOls@J##LZZmYW?Wn2 z;_gJ_vUv8zT9K^6MO5zOHLk;6mmT$5*19wlFRRwYympY(``BXKoL=h5p zaTLHyCd4TCG|gu|)uFTW3y4_0Vb zZ+OESoOU(oeQxgUPffcr{X{)CC{u3DGZ$*LE4gA#V>0au!mHP=0o*l4w`D(P>xHdI!_4KI*kA2`Y`y zJx;r-_APKa(?O?Qjo01yOj;4wX;%kNq+L~Tz{D3{joj8gl73=NLZ|d(b)E`8t)}xh zm0%T}N9`U~=se!(DJ{V3Jh&`ZC_QIL=MgZ2KLjMd-&wErfDWf4>7K_~>#=Msul3CL zrdrQJula6QTOKKXotH)V$9sL&UY^K0GgLK9b2gYC>U2^& zY~XZK^EEolMBK#mQn!8aH5ySxsLE!Pn8$7&>7o72kd^AIX|H~N&vK*VP}w;<0*8r{ z&|ahAgHCJBFZ5m9U)TbE#{Qpzj)>-u?I`5&}4RzkbJ5ui(PfXOKTigx z%PvE%Op$sImzA6;fhONlR+pVbmz^?6XR4=Meo>qE9!{s;!;kh^s-W{QN=Z#_sc|qS zR*_6VOiSA_dLF{vuAtF2@E)djC%SAVITH_`c@J?4G7A_@JX1b7v#QB zNwndomC5gMZ&iHsU3#rqtmv*Qk~3#y)8IY4+mv7#vp89G%?W*Juef^ni}GT+ESjzN z9^UQOLb6dRfU^~poGCA&$zW%pN!Qd;f*kJ;yvK%Ga^_4;!#M8J-O^=+L?1~_7ro+W zLY^?-vgI)Y7D_j!AUmb4nyo2}V??^RX(!p5oi!@#RwN?>n&5CgT{H3ZO}T(5B}fDi zoLMARm#H~pKs3nIoJJq1{>KD3fORrL|H+FpHBjy}p9pI+>AYmtPfEV@Hs>((?l~3t zNi;|b?Ku@4uIoc|E<(4cdK3IeptZX`(E97Y{_8%Z#X z5K+RDr*gzaPn}eQN|CAjic_1Cs8Z)ad=V`NUHd4--?e0pWbLo$X%G-y!jReYD7rQw zHhto%g|^#*k0`2KjT%zXVkG#eJZh4UI=^Th0Owq+e}s#l>OszfXVpyngQj_qtd_mG zM>nZ1Pm^<87_Sg`04%UE&^J{*cnDYCzwx`cJRT>H-Mg5R(EC~&zDx2d4z=!~4Y2gF zPU(*(nA2go)hS=ITpF?l%h^Oam_za34c!IVS=giAHDQP(ZS@fqw>?J1_pSx!fFgq0UtZqK5OA5 zVU_rh$0|M8466>QD09*sW*tfJq-2K;+MI%l6ZxK>E$;T= z?B%+o>;1B3OUdDjjO{d{|&vwzDHh)&b>&}xVR>=K16ZglB- zqh8yw@@qt;$Nj&@g7~}tMYZ{WOVYGXw_GJMot#mtiA-yXmAYk_U_eH@tS0SE_No52 z*DsB2OFLTWa*nBs%RfeX{x`VGM?&*EG;C)4QWY!K^wCsf<^wH^z3aZ0D_0j1?mY>`K+qcX0X?sf7v%Eb%rSeO^JR-r34XEg}B@p2&P|306RecJ0E@jfV^f=LJ`D)Eg%6zuGE_!0@gb9tWamj~BwH{P0lJ-fdVxrT> zb2aLx@mpRY)jq}K6h3lBz+yGG(zv6dxI(5&21|1IXtn7E&6qIf_z;}2dU8meHm5f^ zAz-dSN0v@nb@%U*rK)VBs9KFmJ~VRZsp&$depR(5<^m9z+oEI@FluURBE@{$q`G6`nM%(J9aA2Yzf0BAah5s-VI=+3 zk&L!tnTG0;?+ikUTX_}j96(G0wBa5re1Y-BJs7;@_O$fHB?wV5_;d6KgZH^{!)`kCCM%*wQ2 zhLz!y4!+t;=@hs#7$3NwvX^_3nhQ)M1Jk`@kkrf&3?AC_MSq}inZlcEj7i^r>}!k} z)UzU0dbCrILT2!ZPFM0F^Pm(h@6n*941l}y)a5*e z1*0$vo2*aDe#;jytwaOHVU6HS6Enk;f*nEK$#^AhXcQk6|8f(r%t6FR627jJHP2u1erF9&MT(^}sisvdU5ftR+=BxtSvl>+jYaX-2Am&o z*09llJID=N@kRlqn}RjUn)=mqZbo7(Qe$OuKX5l^b<;EE9NdGSc!}X zH#57?mod4Jzfnu|1j300aJ3$y6`;ygsn)ukPyjnJTgb}a*H#5E4oY*A@|g;pjV6GV zE@*;_D%FXMoyyACHg3ySsn(CFEV)v1H~||s{ZGsg$;dS&E9`4MlGUIEqtk&gAw08T zFcGW6}9j9yqAq%h~w86OIuuK&TMzxelC=r7vKN-;yHDNj$K~!(gvsg7ekuX z1fN8J;L)iTG|AR8u>p8_RL|tYZu_Q#*|~o3!;J@H4jVIC)ac){(F^J5mUQ$(jYlhB z0+tpu_%0hfpAM!vu7C7{>EL+be---yVKEe&#!=_Y2E*Rd)QQZdoI2GuJraABpFu8z0u(J+2F}^a8od2 zWOTIgP{S!|wT2#TA`FM{PRHi`*gG1JHH_}gPiXA#+GEGlp*cVF=Eg$}pCor`=zAF| z!v$|jeC@l7eZSxe3+iB^9Wiqfjek~aOD5$^RC&h&z*zIMvSWRYhWujK8&usvHrx3) z_~Lk9Vlp~yH5`xcKUzF%D>f-3=c=ZUV_+&;9N+(Y#VyZp$(>|l2A`7OJaI`eNXm#i z_ol~G(Cs+CCphK@;`@(Ksm0yk>`P8gpd;I{2pfs2=*H?S1opiC`Jqm|vMLro%xOtE zDJ)qOZO)tlNZ#N&TTZL(;a?AT95!P`UPc$zA)gfYz(q|*B30nvuD#SJ(!=#;s=F~J z3ku=S6Ro#~0XECOrGlZ9col3mZ2;ob%pY?qn2dGB1U`n`!mTc2y~WaF!NKua(0QZMWD{%^a4JierhH zlGUUlx2656Aa}BuR#X&kh!g=yW~-+m%ObO|^y!6s1T?$DuWWtU3io3YF2eZ=v*K8Mm!?4{IJj@}}YiPxGB8E8ltk6Tx@>@F~W3 zp1)qcLkZ*6_|EgqN|r=R40ooqXr{8NTx#&v(9Y z-P3#r(e)(NcdmXS_|82~F}`#4dil;*SLZudpJ~2hK(ESo+!CB3-?@6dd?$mCdcJe@ z>Et_CXZX&f=R2Q&^V59i$;)?M@I>&PKYEJsofoW^?~v%Un!fXbGtGAl=vDcSTY^*M zJ1U8Lw?55xp1geLg--yy#5x9Rqq*zT=kQ6#34J*2{M?_^9VQFFKuk=S3O5^E;mJTzAXUeCNr_ zcV7HN@SVz2jPJa7y?p0OtMi=~pJ~2hK(ESo+!CB3-+A$R`A!BO^?c{Wr<3ozIKy}D z@O)?F_NV#Ilb7$j@4WooPxGB8FW-6T6Tx>r^AzJdFI_L+nO&Xly!1@-9Rqq*zT=kQ z6#33e*UNV@_^9VQFFl=n=cO6GbGzp|&pP@v-+A)#otHfkeCM-IF~0M%_41uBtj>2{ zcBc7`0lg~UaZ7NDeCK8B7F=h#z> z@4S4yeCPA4^PQKUX})7XugZ7a5}YF6dHH(zP6i+KeCOq-lkdDd!*?b;-|7DF(|qU2 z%XeP!MDU%@J;nIWE7r?*KDRpGdBvILI|lTse8(-pDe|3Hte5X(@KMiqUU544&MPu} zC-!{jUqAXZ-+A)#ogaT9_|E5_VtnVv*UNW~tL-Z+Sp)8^2uC^l+e2c`9Y?%`FDW5n)xJv>rtqV5erAhUhFT zhnHI%_EgTLgVV#o7wy@UeQa?JV(4bacTSdDGf0XZ0ZiI}z#V3HmpHW!0K(0qIt^T=gF-skAF4a<*C&- zJm@?q9IAjQe`gQc+oY$~H6M^!YOtx|R!i|_&mu0l_h}aKG>bSj7V+i(n^^=e z|G2P-XP*`hF>+8vI~s5ZO}{P%@!Ot3-1fPr8N|~J;?x+#@&B6{1TX)%Fo-K3Uk0J+ z*To=iatz|uFBOLdtn@6M?2^j0RuF4r-19S?{gJDOMP>6AwIEelEY&D)yF520ZM%mS zC4BlO5h(ekbFI8mn14Mg6JN^%Y0py{MYLg+Yz)VnMuz(-aMv<2>^hsG5FhPTlWA&! z^$z!j(QpeToeKOvi1^dW|9#p0{BtPk)LI&la@=0(RMBl}R<)N<(6)yWg_kgZ+QR67 zSwFHZnkW@@pDDta7HwAQDK&~PFox1HU9_O3pP!jUvMV-HWLnj*B9y+%b2GBOP>}D| zC+|EhD-7LVid{JVsapfrQi!5Y2lz&>1KN}iq5N41F*n^;l1>qWT^v#J_Zt&!Z%N`ZT{^!~I)@}i*7 zs0=vRdi1j6&NA&(qK$Ie*idtHEyjFQ#aP)edT&*MPhX^xQ*G+}a~v!~<%xL5 zSBqO-YxTKYi8J9FOCq2V484K_+nB)3Tbg-K39DI6EPOY7!f#GpAFwlm*D?X#g(1BN zR_%~ghS!p|#25Wpap*lP__-i+GaIc zFL=#Jc)_(;U4~671YK&4U-c`&Ahpn|*!ER2tLxDcvO%cmlCy4z+Dr>|-NntM*7jEQ z*0!+q+V1Auh9=e3)7`WEMn`&ujlst{!sbI^6L++2XjA9#Qh%v?WAITx78RNoWMe#i zA1lpJ>j+gb-f<6=C=a<#6K&zZ0oV7}zBS1aXY<%JW`b%@Zwzb<^@vpy%$0_1z#+{2 zBiBa1h^~7i{D)875QNP^v;>u6W@{zcIp(LR+O#aCTpnDqGE;0ScL#5^;rK~(Q!8!RTHv=)xozukq14Sn^MzHD|0Hn&O{#r` z)!Oe+jcC|nhkC(GDvSqg)Vup?r(Te1%l3kM zb2s%tR*pxqqi7J3Y$-XjDB928v1(HWk&zl~%=SB8dYjC8tre)^UL>2g790qr+wsra zR`5^Ws(!~M74O1uyW4R_u0!S9^UwVO|J(RVs{bMc%u&GXj>G%rk=oL?SGQo?`nBTD z*RTN%v}6NnOm+jN4}AZ1Y{2c+{ReQi|5{IuMV%8_1xtKk$MIH3w8^1pc%=%YzOp3w zhvdoF2+=2=44j49vMSjZ+}Fu2600?lY*eWoZf##&&AbM5Y&G*Biu?-?L!6p>RQ$$O z&!h3RBfD8pq{ocnkx3szryjc~KO)beRnm5C+#HAw;v4>i;%|~~f61@hDSf0h(XA$-fF>NAPPaNb zP;RSTes!=iSD8uA$$?bQs;N`z^eo4S6V*won{=&dPlypuMe`b63`8G+gc{8fR(!Xx~mwI)l3S6S@2&K=2K}=~2gWbL91Q!_yUB60u2yM#m?-UZ zIc&NX;YL;LYpK91AXt0ZI=!W?V0)=$_)ASiVm8LSwSnXFdIR`q@CcgDA zibFKXYWl^_p^?WK>-p#Czmvs%yKCz$8L?j6w_V$vZ)q|`as|RO_*PRVLwJjOgcPCH zL=80rwo7+4Hn7%W9B;H3U#(dT7PMvfcvJC4CQDbv_0g=v&wsFZ?hl!pVGq{4$z$9L zwipxnE42sGl@9*4I_=w}A6DWIp4#XmZgl%Mif8-f@0gLmVbe{=FaAbxZ#3hD$F$gX zaCvUo|J&GjkN1CYyZJv96mn3C&(PweT`SG@QQ2Z~UzjhF55q()K4qX-XGeKz+%QRm zw(!{ZkmJL_{hc@-Xp`p$t8a&S5|B6C-sy$zT}gK<*NMy6d$}tWv{O`#p7)Kacmr$h zC7#&g_GsAdoFuxQTEekz=)ZI?AoOBne*grZ}@1uuaUU~{@?t}M1{GOyyA zNPAGkJzKi^qa|J*-M%L=rLEr+SEhPud=Zm9sLd>mgtIB9#^kIyu1aYmZxW|Xc#gRu z5Q=(V!j?(L2?e~z`{2)txnf$d3Eq%b6jE`(a;l=)LP^J(l)gJ}!9eLbscvTNL0Qc` zG|x&6WmBLvDgzv?B}N4tpJNn$XEvNb;h+5*t&z@21g@G%$brIN!I~A-ETsdP-vsUt zG_bCx(!lUnOj6TE>Z{sSPKtt4))>*r1#?Ugoy_)BqI9-?vz|(~UsEW=Z-0Gpb6k2s zcW?Zzg<@Mo54qrL&DFLlEhc0Ty82I$W~imzMd95gZ@nUVA!8Y(!Pn&QLMcpbv|1C* zo46^w=6CvqX~MGDxK)3bgr8drZ=*>wPYKD{Xqjdv-YcS}lnjF+>6W6!WEsOtq?Bp# zut#r;D?7XUc4*nmmnX4lHgI*Z=LX_Wf4jIN!hKcl^t)6o_7Fwtj~;y5>|iwmP)|v; z1=9%{-ufkXtC~!0@Ih(PAsS6uakja&&TO*8reCb-XJV~jj6@r~;TMmi(Ou%rfXN=U z^G4_Ef$Y~!9`U};u00n*P*V4Yjf>ST`oS()TG~ijcrD7k@$g?4dtZ{Zx*9I-Nv>`D zu1#G-FKoDy7iL$|8(2R9sX2$$8VmaB)~?Nwnjf76Qt!O(H-hlO$Ar{$&+TIC#aDKA zZHs^Bp<+kmI#a>EgMZ1iGLmv;L~ncTIw(SI{T#w++aQ$35TVv~ZZI7EUh^S-`+HI8 zMNPRdc>U|bN1I+p?}kCOKyvu{(4ZqIn;kiM<4_jW3`C24&@;W(;l9B(h!H=wHBo;~VrMkft4Q|%p zmSpgT$YzHSaP7zpR;yi`5=An2I0=YYO9z8Q9vB5RV`n-ESz89Rl{i zN`Ek#52tUrl_vFK-FtIOec^UEkaL2EZT8hQi)2UlRpS?ZBy7o}r2Lz4eWC~twFSWcdUTa=nDDl@heL;Mi8yp-S)4vt=(5KrXw9XuYv1XkTVvD9d zg;&nhN;d5j7?TM$^YB%*CGXdl^D$|Yd0YJLMU1n@tRYpTuc09&tp}~&C0d6Cktzhu zT7&DL1>w3vqp>9|(h{8!yHKQPR^A$1pZQf9jZ~Fz2fw1Gw+1utretiYglX)X_%Ttw zG;zK1XkxuOl)yM9K1ej3X2B*_Y>)Mg^Y3+zvr!8eXEOy-KQ!Z;x`3sZW^UQSl3P zNz^ox%=~s3f|{My!(S-9RZaret?{a^x9NqXMY0l@@YDZfp0D@Z(lvDXjHZ?muBoxQ z7yQ2=9LqYGljX_p3Lu17;l={=?}7ND?-ch$5=Hn)Fn-Wq(}ZZ^Vse?8EHXma=LJAhzoJgx0;6-2fyx% zU$IntW*Y2}=)rR4F9OKhfbcVySb%<2HK{bC*+xl3SteC$ZQ4M_q+*R~{o-!@wcgSmViW7M%6B29U2>C~S_O{k>xAYZt1mnj412)91YR3q)giSQ-{L3kk-W5U%6!)i;^UfdwAg1+bXo%}d$UezQGy$*M#{tW)zw6JxalS9c;0RHpb9FzJ zUTU5B(CNd@;0@usg~`AB?BSr)5`}Gt!nOnDmaXN^uo!lg_|g)NzBiVrA>t6c(T%q= z5-9udA=|-(5Rx7IN()-IE3)czVMpG^TiEo~jZ9m-cIBbSqmsD8qWq%Vs8XTjwDW)? zD-3WxQ^4=PiM`U9Xh|)lPPldk&R=w@60Fb7;g59)QX@s2r!_~00K;-fFVj^;Qjvo&vTX;jcpuy z-}faSd^9uXS^xX;{GZN{dw$F7wl+1bHh;@mVC5T+()pE-87r|V;LIXnFtNEaY+)V> zQmyV(ewztPd}Q8(dr(qBhLdybvJOONXX?S_n1}gTkphSj@0(|vs!efJ%VFwNt*xQ6 z4KpR(SxGx00I04_*-Mdlr0tF^fEznJ`*at%W|{1WHl(!2$xq17v-ztU*T$O zLQE6ipI?l^AnjWiApb4)pun1${6n? zvq!t5;+oFY&q%20uwJQE$U)7|n?ep_{6eab{}O^g3VFxpy+Zzl=$#LD4ODt?w_{1l zrzvY1jI!Nw_%W9pI9egZa@hlREoMO~1Nl`&i;+TX*PErSV1+Zt!VHc;4h%e8AsyXuGe4^?S-)_#nmJ+BQXkVGd z>ub(um#KBazsc4vvCKR9r&IQFGVVy8S_fv|k%CYe$Ic#IHp)y1J1p&!^mLhD9sVX) za=q;5tre2G`Vc@yKYy)>$vv+D-IcG$eZYznfZbwu#&*qEcjCA$z8Y4Si87wVuxMKM z!a4ymN|LEG*%9p}S}f73zb@o=`~RYs{fs<{qEHvl0FeU1Tjjt$$_p?kTS0%!OB){) zCb}DD?I52_ALeSC=`SK3sk`ZOr^!5R46`Q^jp#8+HZ0G_uoQ9nBVhxP@goLHZ7$=DxNaVvZj@nr2~oG^8Bor~vML75chTi@CvLB7l> zzA;cK|G(*MjLoY(Dyg4!Gke*w4`WxNROj!5iCByG7yNlebG85%HJhN>K>pY=;RP7H ztgj;GU!?&Hw`Ts}Vv9ex*lDcA;A1I=F0?YFS5cGPwYyV76bM?Nm$Z^0Y>+cM%DKN~ zdSzND*l8^)G$9l@0*r0{EMHJ&mXoe&6D%uh;u)oz@}y1Nr4_1CN9oJ->Ms)JFE4!@ zZ&i~Hf5k5;PU;Po#WukAFB<7uU33x`W7BO5zIbI!Z!xLMN#8fiv!rN2oY*@N#~0Q8_Tdh(VKG5Gs1c0|N1v zmFXxI8ZSnCF4^`^*@Sxy6OJiNCq8kAuFV}Gyhj7E`7~Z)povoS z$(x9oSp>@OtfA_>lStF*7}`eOe+eAlmOGh}Ze-S>X{RV4lAx?u*R(jGQ|@D3+?0E9 z=m}FhaKuXOCW>a9S(-lP=2E%tEt|)wsvV`jbD6i2lh{4z?2;^>Z~g;5JDWgSg{F$M zM~7)Gz1D=QSqRdd$6!{L@Krk+`AN#Yve8Uy*D6M(Rc~*$pxpS<=u@4+<_b!cWs=4U zo1zeI*Qafu2_y$Dx2`~11KQ0gUjB-E*^+o#ySnr;Dhn@L98^u?e#<>5Vqdk_=X?5B z(fOjTPH9w#m~kqLAe3cP%`S;XZPTHg7HrXiP z12o2L?^bYKlFmXFGU6)eu%K*jJ6 z1)AJF`+FQ>omML+MWl+QjZB9YD4nxYid+1m=A%9N8k5|Xo0LhX7o9z?ykb7+omhcB zBE8CL&9EdL*igoU(yM)PcTT$*u0zR@wI#Jq8FW}6rc|SMnNn@LjZ;kTNT;Y(tYeiT ztTbJfvjeK=*waObO@~xv*SU1sa;tte`03kI!rQHFXt8jqrK_L`juJIL6?z=At>Bz% zYehL>@`p=5JmvdexA!M_pQx%kF@qjo_j*U^8wF=1pzAOA?VC)>@dat5j@X!9MM_<1 zrVwZHiR5I!ieB-rd0xY5sv3>;WiIm?%L}F5^(x{uLN48xbfyF$WHihE7Rq7CdKJ9h{(hNl90t=bD_-|CRyVesZ2)J0jOUj{_mzTP988atg7eV$ zH!cY-s~A$kEDRyh;1dUv7K8?06aSQw?Z6<9S!7BtbDk%2$H-UAHgzHq@pM}5 zWFk03*;tI~3Mw*_0c6Ho=`?rZpVk8EESYE*CA-nVVX`{^V;%+JVy_@fikdAGFuz+& zY-`hJi=?kCx=Hbe+*5F3NcAW5kWG(iFJA|TG#)Zh8(i_HpX+sHv80B&vJ{3VDR#%M zAsRDbg)4nLgOkP!G6OAj+nM%o`BYh+S`p=z1?NQ{gSHpLd%#+l|bYIXh$YHdebkl8SP3m@p&^Wx;T` zUm|vC2!VNMImeC!S1ff2dwFe+u$L@&bu`EsZ_AC9f5P}eC1DKXb$%?r4p&MkGda}( zNF(s+1f|M*%iLj*6MqPWj5)6T_#j-vLi*75WJMCG4Fr;*W#WnF5s4-QxKTQ`Q=DT^ z=O1{vr}WdG7|-CzC;u%I=i#s&O!GoJ?|qh`+JP0F<(Fs7{2}vGRHLC4!9@`LCsqWz zDzl2gAi)$73o-MD#3%U-k973tHc5&TZV`WUU=LEq!3wu5L=LRd*BlrBMUpiNObxeE z=kEceikjoBdYU=!B#G!D&Sf~5{64mx9AoHQ5&fMD*D@#-T`MnolJ}CkFa|=E_z5rDna+;VfAXi4=UM@K6y!LX z&4hWa$CfL5O+>t)H#omCAxeiHp!mdnVyq_oyxb@4xKEtgXlXh|%}Ogl446vWF<|~goW5%hEsB&40s0$M*$79Oef4ZP%K-M9+Y3gM*EBv z{ZU<>IldByIg;ZmQZWp}#(MfW?o~-r4rPT*F3a!MYRyCLM2bJZVl_a_8)m0bl=s(z zE}4}#iXCq@;Nr-cpBP!F~%8BAdKx83Ke?(LDd=+V`|w#xm` zoV=r?!P%zIntm{lKbX_e+57#c@tl~5GC3A1C<_rLqmi5nVGLjL*On_ocl8(OQ{`4# z(Er()Um1UtRV)07BT(To@TUVI=`$r{60}-Q$D)A>XN@wL#vzIFwWt7b5M4hmz%VF_ zaLRuVf>}+MS`4%48>SfzgGAx}6Cld67{i+C7E|(RA4XUxso4engIaPBBhI0{mJMe8 z30mHmX%xpT!f&b30=)ckjF;1AgO`>e+woZKo)cV=Al=EgL{8*9hxLV?Wj*;%yei@|u>xM}8l^E&1 zde%r+_=-omuNn_r72VtyEPl2N8o$4AN(ZfHXR}hCpu(AjE?G$sfBrz)RT_+tfz>ZX zj`bocIl!Sv527aAz910cJpR(too%HEM=NU!B zuDAZB9{OfzrY=k^C!!ME)Jg|9W*0~6*9RLbGDmn9`xtZ9+%9wIFlZm%K{z2HO#Xwl z!__ujbms$QbT)41JV!VdTZ1mwZqBuH$&5BeGM?GTujzoat>E{y%f{79hQw+ zbeg@aj>c>LHiK--$@`_Kw(XU*Q{4s1J+#wajwa{8ur9onP2PMx$<7v|^b-$BU0ZQD#K`r}POf5j9=woB4H<5>`Sw}gA&M^4u9?_m*KUYe#Z zpP)FT`p(E-OjTC{OsJlkA!LEs&%V;JF~T{DZKWx`DAa*ZwOmTF;-09G-N}Z zim(HB8x=ovg4v);kXYCc>AND-jwUSM%K0bw7eD0R8&Z=QYvrzGo%)9OmV~~(X!N9o zf@e!H9PMm_xe3IlE5blAL&(oO-Od~CcgPr(l1XYy#KOuMDj)uxpC$`ek?PYFk$#Oc zKJzz~zS&#`msA(yb^4B?hvqBJm%B>eA^pl6{L4KEZCAs+cPOJ++WNK4L#mXizqOZE zbufXQ5|vuI_B0sC`$-y(vs0+#z?T{5OryNojS}6kIf!dIR3?mkea^<>y5l6W!g4VC z1!eOw)C6n{R~T}V5P@?UM0A>^wd4pK(*#%Q3g@fkG!BTd9`op#1J z+r0(c;tt?DuVrhRtmiAOQ-Ce5w5}jtoOsQGQ^ZCH9|4zW!nIDefFDKKt(xu^79 zLv3PJz6iWa-7-HiJlc1IEOBUV^nvF~+#jc==U0Z-mPs#u3R+WR%%QcD6En;rMQbru zoU4Y`pEi`ByVgwYb@(^U^Z4d>{2ow>e9D5%Q@>gn&ayf?}lIz1w_$=l1VFYt=QtQvduV+O%)epja~Idx@> zK{Ij(MGky_qgSmR%#AQt@gb3CEb1 zh_bCB%b;gS+9h`1Q}!PD|3E;!Orh9{>^yntcuq?%o|mW068BoTC?mhyjXF;{72a%< zAZ!|MOw=ao`{VOy8&~Bfq@?4#IH)K6BEeGMgYeuqjbi`^;f$LLeT^z+wd(ugcXqCb zzHvd&R^hA(O@~u2t>w^J{4QJZRnY*c`^rC(YmhdQT};HXYI#Z%Qocg?e@J!VPvH_G z*bWiLK88U=ujR8=d2bP~wN*r&0N!?vUo8Ux!^M6@WDv${TU}ZSw&*hX))39D~K~DlmDBa z^Uk956-Z5ME$zw0mf*rA9jI#qvP|{#ea$4vWKZ8Q5IOdNCE$n^I9GqSLn@t#(&+Zb zb*`v<&6|D4c&ZFiiwcW*m$?{?D9Wk}z?8UxMdkzhf<4s}>o?*y^1>w1UwKT>L)g6f z=?x~b!X|XM5c9CbFvWvz5u_cje{qRMg^96;NRSU|JVN)`-gyvhmWc(+a!LUiel*pM~5WieL=)H@2O z4-8Ug=K57BjH_*M;7U@r%@S-zOu@hg(3+i?;@E@}Q|+;s8Z~|-qq>FVICIxbIM7<7 zj$(?C6H{Q0Q5%G)miWgHBr-r*3k6qj#W%^Wd3N1o(kdads z&-UzE^&}<*J9Gmh0n`tV0`?T{I*zZie9^!Gp=^@@QlpZ24dwwAb zjMPPZukI{g9SvO?Y$ERe!AmU!c;BT2b&Ng1hAixYL7MT&i2Gy!!^4LFIe?Rh z9UZ{2sI%zATLS!5Q!{K^4nxaF{pNbixD%Q{B^xYA{cd9uwSh&s#!WxsaLflU3pTxw z5*t)J?rLgXr2pk}&5K&(gh1C>ZZ%@A=U~ylH}zeL^2^CdYNxnLPuTGKA}#$yiE2Ae zfwDx=Uf4*Rta{U)CYMJ$E)V)D>c$wgnkEMM_=@n(!=eadtR0LKqcrsl z#mb6^h|e^u7+3Cvq|BM{7CFiX-~@l-MYOXIkiTr=MC3OphNV0_n-9-X7uM2@?tbg* z>Po_=vx}ozHwtM0e$6Zos$r&-2fuR!>gy)bIrRDs_`R>Nn`&6TBs>`9E3d(dWrtWv z_{Cp0k=|bZhKck`qso;*?-LSwO38=1i{TXX(OJF}3;K5H4h-dLQv<*DB~4B8jwMws z$!NpSj)uS5b&fA+STLT$Mo{asZwFo-<2iiB5A5Qi3JdCYd3i*EDO*V^7}k1LbOhPp zEOhoP3*2i9fA%2$pt4mi#HjGz7&dd6BEJ^R?zX74k_UDK=T!`u=?rA9QD%*4xoQBj zwWMHn(~?ZRL8wggmOM@|y`#2{K^%|M87w@${&u#h<>tafHIB{FGhKt=aG&8|B8Fp9 zGTFL0Z6zF;++pd&9;?tu5}b!^59;Wu_yNpVab3`%0zRaQuVpQy=x^Lg7I=#OviCqb z`NtFVH=m|ji7XpU%@kfu?xPP9#2~vlQC&mxaGBsq!e=r1+l2jOXVtJadd7*pZyNgJ z=$lE=KXpl->PCc`0@KkuzR=ay@l8()Gttj?2CFJiDk9*`ZE#+HgNHalDi~EjH&+@* z^|Ct)yqn{Gr8|qKtG~<$>gajzG^&#Axfa`VShgR?Yw{S|(~jBjd1PrccR^QgMdqNJwlow~ zA`U-tCr9)H9`>dR+;QDjtXZFW-FWI1$Uqh=Au3cB(#(rKi=WYcvTy( z>c^Wd<#)UPXrPmTaTU?-%600(f72gO-kUnI&O0)}en7Jej|;?1`>4!#rW9wD$?vXh zkyAbt#k+l#J!m4L^k23uhn3-$<3|=)t@LKhq_ND*3f`Kj*}TORbB8KxJl<%VLTmsk)7Rg@sVTI%LjcJX&cXt`#?+UsP2cChuNLy7B@Sw2 z{4sO`s7W;w8$2pWBB)7{wEu3%q4Cv1>LdC*^d`<$ypwfyr|Mb|flQ$M zhK`Mdt4DW^b0-mun?|X`1swK`7rF5*Hd&traz;E71}&PgBD_4E%ZH$5hkO;V0-sia zk(Z=Z@Urvv=m1^V&5M=agOkJzUcXK+jT3ixBdJY5!d&9oFz1nRc#)bjsjym74sW&; zELin=5)MD3Nvl=u?5Wbq_cncF@tA<&*gTxNBW3Zmi>XVis9DiLwd_t?{vH#QPa)Qe zyj*bW)D|v%%1z-BHL4X1Gt2t!Gy3C<@VBI{_`2?ikgauf)OtEP?lV!)QvO!d{X~v; z%q{6hyMfIw=`TLjHV-VbydJR3qE&phj2^J8aW0QMcn)!gYuU{Z2OdD&MmQKN5|wN$ z+=VnU6_G^$|NSVq)HDdE`>GBv2$;n&`5O%bb(K;!uct8r3NchG@3Em^<@7kno*N4C z9Mlnf^Fo9}KLjVZgd){4++t3FOv!@wt6k>WrEANJUU~)$yeX`z(E z{HI62`%N!*ah8M`0Shi`lB58m`U^Z8cl4`?cAokWgg~fXj~3efQsfg6215-I2FtZ;&e_i6Xcb<(8C`@5bvgvn-J|Ty-|g?272)Qt1TPHg$>ILwpN)Q~pWqS}d{Tmm_lo%7lO5M$`Wiqd>@7q|NYb_$kE3_$&DR~Qzs5OC zW;w6G@D0?G1x`Jw;rJD5oOmh=#FA%&(PVvF#| zoPiZ29C-Mc1wSH+cxd97O=tO<%psB6b;pTxMq!Ixh?l8{8Ruh|N9T^{-X|A&yhmr1 zB@UtaRON@n@H9Hp=jG9dURdn)2bknY76O2zP2pGmg^g=Z49^3~OtH=o@MU*|)ShanV(wJ~RG8K?=fm#kS6W8#E(DM7AGz$Gp29P zE@-7801J>@v;_$vw4msQ7gKgZ3WPKGG#4|Z$7|PfzdyAm;La#6V7@F&r8N17C7VxN zhn3O|(+X_awp+F$7iZTm$-y0 z=G_7jYx<@`FpmlE%T?Zy)@_=;`F7<&SRM`D6tw(-xRy6}#d!qH&uR6A1ez2iO#%dj zKH$7MI4?`gSSe;L9mFSo;^QJZEMjgpu;!aL1sit2aAVtj(MyVpn)3NPx5pru?C`YX z$C+R`xrR`#?3khG>X+cgpE5o?voOiSGUDr%UWgz^Yt)5TXw(4Cz0mHo&2jGd zMvKatuOf?841*6ewjHvjL-fG+3l@iOT~No^7$sVpJcs$+PF=4pb8n1X6yO+n>I^KY zhDLmk!Rh_s5QG4waSpr*S9Vnci85!o}4k-gI=(cXP&xGCk6hIYcoFt9Id|HYg^FujTI#JM2rZ1Y9ohGwa|t-#KgJ z`rF>P{!Sy~y7nIA#<2DZh}EdbYP1auYtTwc3VA20nPmD&vn=NC-WqIsv2Cv7sbUX_ z#cc6K(5oiC*g!kYbUyN$+tbQK1VgZ6OusaXYK64Q)!PI!l`)ZZVAmg~mGjLo;QHg# zO5mzRSqWahT-HFpjP866r_@=T%PJO`DGCv1(q&B8VBgmf=RL>AO zh;$@eB3;iB-9#R=OwCGHk$}Qr}K7r;;Wcd*_ zxzZ;X~>{*ayEXt$TJ+3pTeAkobtRqi=Woc&EvLpGY=>otGRSn=8i;fT6 z@id$8=}`m>k$#w9$lh$(?1dQ8OQrrtxm98zif{wOWAgU@QW)Y?mW3?aW5_CpAv4Xo zvpz{sI4w??efyZW^Mhh09w!`1q_c}T^>a=94Zz$pT1zoNy_J z-}AWe{aRee>*7_@1rXwPcX}8ong)VUbG(9p#5uvCeW0l8(Fc`sD9@yXxN>So@}-}e zl3>0`zEweiPSX?5aV3A`fjGPz?q>F%+M~5v`!rkGYLs|~k?v@)04!z_Mi>jF1w{u) zli;rr#!kkxgi+Kt3-qD$w0N7KK6z_zPChMOBlm@W8*xC1mTcO(G%m9wGs>s#D$3kV zi*D}Z>u znUZ6IS6gXMVL)uP-bC07Q1)iaCj5CQ@p*KKa$=TOB&{VwZHkt8WOnwWUqgis5<5FCmSG!gp1pBdB*<<{ z01=|+5b(}_l9bMnhGB$A7{=`9bPxt*lOHNnVmL_(2$$R|lJ91~l;(|SM&mfX>`l~( z^hguBy$Ew_Q}##`Ib*_8S=2@oa(#Peu>8@|xKY|4rfY5)owYJFtVYTYNnVqBIgTyI zsK8HkW}DsN!kYqMo977r2!uP_*>)@I+9Wqxr>}A_2m}sgo6k^J(ef* zkUpFb|5nfNN0Jq(XOg2cSK^z&93Z-14=p#SBpx4@r$GR*n&X!tE% zYftU8F@p|k)C}|3n&UG*L^*8?m&UZeiBV;^^yT9&uM;^W8V@Ku_wwO%U!^UlafPh zy!n?H$#m0hk4wpkBIjg+2@EvQX0e-$5?k z!=$g25G{0!DP&u&a$Kiy^_z6hd^p_{ei=r+TJi`@ZLw#XmK3eiHEfe^(1XpSJW=5z zr#sglVzAb0YMomI)#e68sp9Ksp_3PSSksKn;%5PrcW)$tl zoA{{PL{*ScPfFvlO2DLS;DqNDrp>*e=9h#*3(CznF+WxLt@p4fbjir_F~) zC&@12Q*>Jqerf}JT$P(G zxoxM;qltLxJnG=PiFqk2MkM}N09Yj%X9x#D=fh)7;n%>biYLwSM4yStOhPRm{zTPD zz^L&akK&eH-yDfvahyF=ij}bl@j!XVr<>e2*CO4q{v9_yDarv`o_-~56 zk3ajH&|jrpilat$yZxU&)~^0&L05m0@oBDz7X4OGvfNQ8TBKp5#}!a~T;Qk)P)dv7 zY59A3$Ep?a6Wp(T-xuHa#ezF;rv079p(wk`#m)0p&fVZHQJddwhKI1h3A*-5>0}`d zG%dRJ)q{hB)pm|K346eU!=RVNlf=E@QHo=gegO1gdj1@`Mx8WYsCi=J2)5>{{M3rbX_s$46>r*?QHR3XWOq$vKyt>~W$J{uk~l?JIBzm6vz-^-0-P zlqrrn@!1xwaJ*E>_>5#yI~9xADz^zTMOEZt>$F6a3Syeo?x=T&WOClp_0YBdX+EHY zm*FTIrI!>5Cc$A_!=CF!yP*pfHSqGq!J;G5*cphxMe5-dqB&L8hilGhm?s$L@{UB# zxda2G+1%NjZE&Fm30R4ds?>am6QMIYPq@yUOA@XuHIwIE75r6HM3E01vI4{1Xv}UP=K@=v>Ob&g~CdYG{n)H5pZ&t$gI`0msIIAz2 zYwSp%d%@F9*-!W2!c6bkp1vQzMuppb$3a=2(8r#7XS@#+`K(K93fQ$Vbp6Q{^U>=c z*V)za1Mg5c9{u_2sT$~PrF?jpn|&}%KX>f)CCaqSn;vG`BPOQeNY2!pUgmB=9L2L> zwkKzPzAr}Z)Gt$^Ms1P@a;2XYH2!Q|w#Y3`x}uY*crK+43_*X+yIY%1F4h(a+v@#Q zHQq}X5Hu8Q;1d9G8zE}rP(<{%e;F)$EN5;qpl^hiZHAZ5X|xb8J&<)ZuMo1CL$}fH zG2E|~d!bW);+2h1(Lnl`Q1QcKr2W=hxtq=|#>CjARfCF$)G%}9E*J7z!T%-L*Iu7YgKNrT#l;M_ zfPbBVeVOC)zn^=pcUXefeP-^Jchv{6`~P3#Uf=j4+-rU8%(Uu*vPIlnW1MNkyhdd8(_C&OtZ-jU z(=sjKvF5gLnv$95GKmnVe`%_KoucNY_}X*h`SNGDTEFIwvseZF$WCf04R)4q;PnID zCP>E|OSC!_kO;XIEv4H#9d~pZuyMyR+l0Gg?pR;h_m=1l!$EiDsB|ZWMtR~7U0O*p zzy36F3&Sy`OzsTCPOm>`Q*+_TS#!tM*xk>KnUydfN~ABjDZ9JTl=O3Zs3($#}8UHu5UI=kbKl?RQS+RHyOUJTJF(H-(>DSemas zgxBqpeVzE++BgHKI}FQhC0Ff|;ifNJ!hd_@wcNqWXJrq1kGqp#Y!-1DrBF~Tjhi1;R#5h_ zDBQGzws24GrheTKTd@`-&M(#>ausoaL%o^}qQVCys4PSe=JO`bQ2-{Z%!t?d?=kii z($|a!xS^dF`gZZIWEJ#9aw~bnNEGWpU$T@g{ZlIazJ{1sR$&Z8hY20h7Szr7vVMCK z2jRS~9T9|a@{VNQ9{N+xV16M%Ssn=)u$%(8k*a?+7bQ8 z1%sNV)5ORNk{F_p(X?i02q&6zTDhq4t#9HD}*q6V*aS zXVEYeQpgJm{$Sgh?igP>)q}k4B^}APkzex>N8E$Jt3zjm>@IvNBd|8A-i=fF)!3>lT|UA*zaX}G_AJmn5kLOz=M>!7z{mlVoxrqe(mqjH)OPnUT-cZFMS zx-8F`A`^M?=7`uN=$x<1{5h;y#EBmVw~r|{v5(97wRUaJ4>(Vg_klSj(Bg73vAr?w z9bOjT@LkSwC4pUA=&U;B0-A?}e=)TahnLlDX)VJV@XXsaNtg1*q!VM_*)>^r+~UWC z;n`4AJ!|c7fzT!9PSh3$7Qu(fCMd1(C+!tS~d|0(%I3-kh$)Ji0hpC(bti(tStUgw1ICu1osy0)(&yWEsQpitFGerLKF`DF`iA_k=-hg0u)Ybhxl3sO?BLg`bwj|psRcfbDy z)AqG=<>Q2xt1W-=j>5eyF3wj?Te$Erm#_`_V`UZaYiTjtV& zx@xVr)NG!G75*q21o|}$4J#0g2(gfdbV;X*OCd<(L3LR(3OtB#^o*YS?qJOeU7Z&g zIux1cu#?=4Xa$CN&yd$~1gn3P@^|flV|g_a0y}a+p6QZRN+Lv=Hx3>TbygK$pj-vC z#JkOLPc+J9kot-*Sbj;GscyL+UFOLg{n*Id`O<1?zSK}T0208nyxa0}jLOeY^96g) zCMUaZvH1x!z0v!y_mgDNRyty?(JecJ)z7eSkY@HNy^|0$uOOp)`%V~TlhOPWa3?;+ zf98tQcRsIkd9>@Vf+bP!lZ&e>W~R$r#gJfG?WNZma$BOcF&CJg zuf(DbS@-oaY_}4W)2(97`Xp*Vqb&WbJx*n@fsCU0Nou5}m|RZ$gx)j9Jbub*ewt+< zuc*Ou%K*B~GJw3%-3qE0yKo^3KBh)@lH(=6p6wSlL0VQt`C1CI88k#5Ev2_m)ZFr{ z?Yx(1Z~#oYn+U?fq>>LhAypAacU!0R43Q@3G)LF~KgS2CF*)qMqqN+ui>902p#3!M z0cAE<`WA{4xhu*FQs0U;n%#Uu&P_>& zbVSrzixdYXbIHPjM<~>*@3FWXcW_89&Xgv1X%umdd^k@;-dru4fn8l**BgM{8?vyw zc_Hjx8O?3!T8*#Zjh`wGs6wob&)%BFr_T!Q>a%i7wFPcg;v|`JSEoK7sh!idwuXo0 zYv}XEuIi6V^43i0dUjtz(Kj(Gv-CP9?T+u)#B<|t?#pY|WPJD5EZ+^NIiIc}TD-Qn z?%eui)-Fq|Sb#kc-7^-HE82gTq`OkH8#cV%$MHt~uw$QwP9~7t)~L5LE`x|CWZU;f zK=j5eh-#Muc`a_~y0tUh#hd)o()H48sQNtfTmc;OTu~pf26)_ym|6HgznAc{aK=qi zL?MemshY^43WPMUX6lnGz)N$nn8jfrMGf2iSX%T1MJ$iLznd{~WJYG(-aspTK4q+sqL$V&yTTK*~T}d zSgIAICI6yAk_ZDgdF(mXl zR4zz7FAIrEEhMyTAP}dzBrH48u*It_(Iw%2GWfOlH{06x`1>t1+F-Bu?@DsiO37-x z-VOCi_48_$YNY%^6Ih7_66S9sLqR%6Yk6ZhHsKNh@$7C4+sP#3vsYcWQC)y44*ar> z!UOSN?eSmk3zKxi9EL9wXt6}5_vEh1y-%m(2lH3u#`FvBCCk7r=dqnYKgmb_+(2a| z|K}EpE-f!#sR>lm30xUYq_b05oBTfYiwb|hKll6RtVAsyr@L*qf&7i^oPKf`V;fvS zBmwSLu9|ow#qp#uQRR;9cD_HuShwp29|0=d#qo&nQwW>R@oks4prqczt~=r&iphCKj^Jo$eJC*kQsMl7aHOt!BeaPxQGDaqu|tXLtlc zGNyT#(ulH!v=9%#1wE(wbrw@Y?OzncTi?H1}giuTBd z;kTcyQNiUjFuyAYH~ax0?Kzk0=w1~LE;)@{x2ji^xT=Ii-e(7#X9&gJbWyv0Q~KAb z&8C*bY{K)FnoZ`hc0$BK+M#Sc6z9S349*)r5?qpdGZve2|u=$bxAJIP?-^}p;E>=NCh9k6E)1e=K;> zlAw%zcu324tb~Uvxx@97`cu}!j40lCi6Tzn6 zDWdZcEzwr>Fd{bxHuv=^!V{LC%K20(0vB-|@hmS{4PgA=TpU_%x$TzKnIAj*marv= zsw2%ji_xZ!2T$wI_RdZ1Wi%9q7=Z;eQ928#1kuRHgWGbYDELJ1j23zEqd)va@SC}j z=o6m^`trjckG}qi;9@?OJ{kN0(aP4XP48(XoT^@0ZHFgXYu2~#lfmV=UD3aOGT7Pv z&O5l+Y6(zVd??t^TrAxcU4AIo)C|f;&pi}$XV=c9%5*u_rmn+@HXzA!Ja-fIo^BTdta9SDN8@jz zrT-ZGW(TpwgA7w%9KHM>gXb*wSTNj?E^Q+E=065kgF9>QF(i1-J;B!Y!#a#vV1aMC zC%CYevn8!zl{Pkao@~}9I#~5r?+I=QMh{?pvycgJG>ESFRIoL7SM-OU3jQXyIBKb; zgN}8kgPx53sA_|LxN3v`xEgHDj~ayx?#6XQ7? zxLjR>Q-E%AGItCAzn&e0Py(jtC>tya^REhb-SUCI5C>vW7ZH$8FseO0kjeJP@R@7+WW7Re-~yJI9Q z7?3Tg-5So6-g-~4vM^mL9kCH-qnH0vunI*w+I3IR9gY4|&;})s{ZnvmWlq=~4mmae zd@*Qr%{O0R*183kTS?L=`K1nX3yh6s6V_-LT1$vwwuU3#ThJQLTc;<9vV9EH+?Io1 zc3PuOt(`sGDCzBen=5vaG6I0ymuKy&>Y}yx+ERZq82>Gin?Qv zF3X~Tdwpz~P(YwOw2Uio1j~tKv{RH-Pr*`8MR<7{EcJw_mI;>nO=qtRmdI@nO2Q8* zC^eFSa=97;O3sCk=x-2dpPm9G&q_G$ds5P?>*?5np47uMJI#}i=f#r}Q44$aJ%h+!BYFiv<< z{*I>rfrVm_!-x?;_OAdS7&h$6ikqHgV~lcK&F$my`uQ6~NZYWf%_4K=)eGQ1p(hZT zQ%@7GD7#e_9EGy&^D#YZ z03)3|2F!u?Lt`;p#r)0Cm~}X7<3%m^1S>XT2|GfYqYzvQ9NtTTGaG&GGr>7<+8=%< z80eap*Bai)WPcX*eU|I@ejZ)_*WCZFw6-oXSQvS%C#zW)k+I!})H6@6=dx`l2d&leF8C{uQ>g6=KL5K<|^y1GbQ&VN7CJIkpUUTwSA$%<8!W zOHEH{GJ0=}omAKIw>WgXySIOdb-l#8uK2#kPy@%+x3Nt!8kp_V+8s@UE8TLc40*hwF|T6wel;^ZwF>gKTEpR$ zZuUfs{p|OxtnX19GraQh{C;{B@0`*T-g&n@Az6(EagFK;sjd{M>bF_zyaJe4#=Re} z?Op1x>Q~`&WPn)x#Tk)%iCsP0)Y-%a$fITfnPf{s>3? zh{LO5bULuAp2?}NQNE=>@0i-KaWi_dafhrkgpr<9Ts^2K8#u1t=Y8Mf^zHEJWW&Da zSK+5$O@pD;LL_Cpfs8a}LsqBgw0Cv1@pD0GEel(1Ja?GaJrcEeBpQj{_&G&q>CIq` zDt<_RO9#>jkifV;3't1ftsu8ya8eDy*`F}s>9%1$#!C@wO(h7RECC)5$LaZVjg ztnrXjM->Czp*7i_97rbCST9xow#5EoFI*L8Y z;mBGy@YI@k1^d@V@Ba5-`SVBCvPhbTW4t1MI=VKMzGG{}{3*!D*ICPSoOku5r0=Ib zg7htT>3eW(c6!!uVy)@U<7+*CuSPqk(48mNI^Fr@Q>Ht=Y6>wsNw>zNIWyH!`k86Z zlWPr%Q~abaf8yBIT9dxhd?~nSgD~vOM4MTgMRCml4P$Mr7ai9|0oGZq7z`afhbI_% zQlBdbwt2k(*yi*E@@t{(6Z&!D91l9S$D!$=bFz@LwE~JcdeoQ`=b&WiZ(x5|Pe4)C zXhZ8afvP9`Xd%c9uS+2_vQCicGRW{VA#)vBbKF~P zK;ip&N>BLjKVI%5uAwILWqW6=Pbcv_((3yYWha=) zN}A(qN`v?HbD0*A?sB)lAn6}=!^HY{ZUlIGvaN@BnvZB)U(8vzhJ<}FJWo{UDbS|L zv4jFL-{;D(N?Ee024%}WaXP_ZzjkE4kAJD}tbaB*onWXxyCbGCG5-F3H;;Wh1>Rwv zu}(0i?^=ly?x88ndz8-zfc`$If8&f}T&zE4;s^WtRh_x5+^Yp^0KGoWw=hDQn&F@) z!yZxBFvv+g(FG3c$$M4(KD6HBG+jd+6^WYlS$!k!duTlqUQde=T7c2E^a=ZSczw#m z_b+ISEwtWNF{+`QPJqg9EOcyr-990KQ3_@lN9!47%p)2TonTT=R#Vk)tYTcB=ltx) z*C!CH@3FOvj(k#$=x~k>=hYAfGQNQ)3}mmKKx2p}x#Wl{{Q+3l!$9`v$H@(Dj|Mly zqa4{#Gw)P4nB0)j4G19Q{tWcTAKpaebCc4?)H#Dp>-jePl~WpF#*Z*nVF;)7-Cr|= zp(G2K;ahm1+K2~kaj`- z<^+p@jWRP>=*ASnhZnS#(sPuCsnT<9Ph{FgV<^*pB9j}P@oyd#*@KzfBIfSqu)u z;5jmoY;Oe?XWLT-sGHT2;MD*$q+akt9o19%l6o56?0T9?dfK-+>B)Db4u&`5{h+gO zlm@^f9NC=8=+VuR(MGc%($ntNpsiOn9!lQr|>mDFsm@{<7X&gmK8%{9SG9`Fr*=}=YN{uKN&o@y|1N|V(RG~0cz z=Ja%|1Rh;D+i5r3sd%FZc-@FPQ?ayz0^cm) zo7)^6{u=ef=Qb;@+EKc<)#S|JdBOmYpS+gzN>1=@bDk?X=LJgziiXZ(ykXu&2O<>{i&vo0YyIN{vD#t>K00Zl3vcmp+?!Grh3f zu4_RpNHxlsUb7hE&k(HdEO(dw1@~_1X&ARTjk!62(gXP|POF$WufN+ynQ5Xp5i4A4 z8qaTo)uzLOnMc$s&%+uBjIHvFRg7tUM{ft|1`T71kEjgi^~uVZVYCjOQ|Be}=e%g< zdqMAS#5(QFdHv|cvotcvbG{qrr20g_&g+Tkazanuo6~y}=ethl=+yJvX!Pqr-};I3 znVEW+b$~lwR`P)SN<9rAqvVCo86e&TSO$!H5KKx zLDMQ8%0srpk=>;q!UgebVKj3bn~fwRp=I~Ax$-$5_=~#~E(P&F4NS#`M%k>7n}9if z?ehL}xN3%RX0Pq^s~+ECV5!7l86)N5kk$zhCiG-Y2YCueb9%ykkLal=fPTk0RMnFx zz!tdgVf{9=)vdp}C0_sDt*6{v%3qX&X8<>3$yRAQx{d^~>EeMzCUJ-qvntR2t!~&6 zJ8r(UX-NvMyMup2(epdh*`5 zexLI_j?<$dl@DY!<&Rs+VH3?}DsivzJfW z!Xcih=NFhUJ3@ydo}mk3#b)G!Sg{GB-QVK8yoqb;a#SxnOpcuzV;7hrP>XA63n|1J zR`_-S3bFo{o-@fGrV;B4ZGt|}`MHcU7rzM?^tVTU`w+pd&)pZacFbL1VtoDrDKyKM zz&qibZ*KES%}KX8P+$Rq4Jp)zJr!LcyVK8iCLUNS>hL`EcGV3GFE); z%!V&cXZGM7=t^*6Q{VSpoS7LDTSUtd&1_Z2#eQO=HnC@pJeG;2Q!3H%7_-oRr+U%d z(YEgfD;AGk+>e8N{9-nk=S1|I-$iUr@OGjORc!%F%!jkJY3|~%LkEdKf+H9ANnswm zL}V84La&9GZ5zy49xdm z;-1HN3jEVNLHtYbqazw6IiZ*MlP=#L(8S7X(Uu@}HJ*#RRF78}AaD!$Ce8;laj8zh zBS5u`f&3Nx(~;ll6sBtQXS2b&XG2Sqm-5P#y(*XBFsWJhPTHsKDNDX zcPdihoh>iDs%SVoy>4qHf^;Q*tv zYGyR>ECWN*{7Zgti>K(h?OBQzYsS}F+3sk0c01OM7Nf-Tk9Fy{r%A;PC+(X%+4q6` zuY)#85Eao?wsH~{?;C_T1N6XtN4$uEa$saUzuo>YCvZ5-Tj>k(uvWG=k3h1{Amfj4 z_e+e2xR5Ka!&CL|fb$4H&WBvDu&`xD)Ub;d!Zbk#mL5`HJP)dKSkE}mn8GLZ9X%eP zmkmNZKEsx-5O)iw7)=7cdPPj+N3Spxi~jcmLHl)6SM-A@)3gv`3}5L+Ij$aIzq5K0 zmAPL}-kZ^TV^_K!XXw#sWOqmR{xIkp7`u{rsfP*PK#tpLr|sYID^qGdq_@{%3Qb&T zGpwpVN485mGEZK4+WkO*9Ngj7TxHDT8WW11^|PPhsaa3y^T-Z2+o>z#+3w$=&g)H~ zWVbP!w?o#oL_KE1U6$W_1)gmF4o7!D*Gsg|BRgyrmf!#^U`%hll|i zI<0AZhoRe$L1{(lr>!FNtxd1FLc9XK<_Z&ORV~61{+f7;cBD|ORCkc_rzrxnqk0ao zdOg{NBbu&YdlgTJeOBK?>@)fvCU9I&mkW4~%LSa)8?E7y9nk&#`gQCo5AhxG9^QRb z7C`5D)7|Z1O7~*`MSf%2z7c9MOO#Rns84EhrHjnKFa}m|yM77Dx zh|0K{X6%SV3)+FdyG=E&+N(5{ZoJIv#*@3V%9IuWo7-(X30Mq3(FB{s07Y4Mg?U$i12ne@yY6r@Q^p0}m>AOmBvXAqJ-YE|3JECra?BMw*S= zj`D|Zdpyi?Pfbu=%+kf2n#`+7jz8wqB-V)ML%9*DpVQ`XeU=|9-|bM$rmgl{_odi) zI4Ds7MsC-t5{yOXCtYaK8FJpss3J$Y|R?+x$tu%ti7Lig>o{eXhYk48`XNw8vxQ~eWr{=n7Rr!}it*VSfSkEyT3m>;{^8T0+ozyBmy^elp# z&cTTuZU}hsO{KTB5u7R0sPxI=D(MqtUGr$h{)|?DDtJ&&&3>Fe!g)X2@vGz6o^+Gg zjOu>1X`X}EKrHnp{@^uc;tyTpO#J@nsV9O(=MM2|V&@O@L1Qnl^Y=y7pORiS5`FWh z!K%XWH7U((|+t4I<0CXo?|>Sru}3z`cQDrZwbeF z%W`1!WZC0-3V_#Kp1)f?IH>?vH1q3_a@kM|59y~V-??~RyC>;s^}O~85Z*P0auPbb zcvnJ3Z6tg&L>b|uXKQ%(H4-uUZETmD?94UsCf~hF-i5VSz!hfbMH3s_#iaE&h;&d- zn8LV5nD?#6cg3ww?lOAnr`0^I2E{^$#Hhv726w+C;Q~WZ@!??0i!9EL;}ZPigaPkN z1_J5ZA3!K|mEOhh9Kj>G8$|L6-STp3Qj_f5lnl0MYfuT1~IDv){9TJKt-*}b*nXz*}!ATeEmGr z_{$tWl|EEy?k}a0H#K)Qvn;6`d;o)EMvI#%qL8xtP+!q_(j1+;4OVHa zPOp&8FBpI%f}$E)wLUMU(ZjryUR;lXh~&j$yz;c&y;#ZBj!RHjs$JY+jPEq(1A0Ol zqk7^L+|M(8wMDOv)O?lKSNrrf(sEco!9Qm7gnt~;lU^tEJIFV#CrGsTrWe2p^rNmE zvDtHLx|1o~Gty$@YH>DUgp$R_HFt3Zcn3AFO;GO9lh1=Z!)nQOG{_ML*U{lp^t_L7 zRZBM|PjB^I;R5{uH2e zDE%#b0h%>zSn0aaT283t@2cf^(z407JlL>hmyTN!8dJk(so_}Mu=G|wl5fQu0*I@yH%uDwMiVR&b%$7hGWe)m}{V7Ll9vyc{koD@MnBIkV`_0`6q2rc1QHU6MI>_ixQ0wPz9h7v(B{}8@pd(uO0x*rv$Fkj=Z_NA4nGAt{FMGT@ zy7_3KGwR=gEQwdIlhf%|`(v@T0|JnnV4L5e_|kCgOHJVk_NAuqg!V;8p|MTss)lGu zbUg4|96buT0MFI4&n!|4(B}E2Cg&>SvNiGfiMq1Gz!x_8buB77yir=<$J=1nF+MO$I z;8?KgF*r22;p<(?qAktEV#TelX0ISCzgH~L_G(3T2jcV9hDa*%{i`4n#dJ@JNmgFf zP{}Svcnpa2jD?6~%x*MhcHs-Qlty@odN7UoonFVV5Wo9PbvBZ;Jl{9m-?*X36~jdh zpC0XRDXw!x4K~Z~uWCf)mG&ox7AT>L5$rH>eQ8*1PAKK7(y+UrA3Y5j^V+M*G%zF2 z(uC2r{USRZcBd#C?I~On7eE&8A-rC?v%pTXN?*z+gsSI6#ne}_N(fpcLeyRB;^^2) zJs|GXooCY;Wk%j(l{3=eY?XR|he_nwdFe&s}TeWK8BBZ$+&*%-5gSeyUD{>O8+a8f>EwSzM(a;OcOlX0TN8DwAs3z@eqM zIaJ-(F&>rc;*KZPk+ZV%Tbr6}00}@tX2V{zC)Kv#jGnH9an;^b(p|97S&_xh)Sa5- z233p*bY5R70skT@bzYxhyHw;7r?zqos048#$0_ZWqT>xmd;33PA#uYC?OA(IAG1W~ z?Li%IWsTWEPEk=$%QCugE4;0}*tJtPt0<#7$I@CTM60_uQNk#(t6~~?71OY~GVK09 zp5kUW<;nsDv6p4heeK0=$(x7Ui{)jU6!Y~Qb2@3IEsC~u6w7^8ZDEzoi*JK$mI{#o zw6s5ZSx0g0l1>Cq5rNaLyhot{52PG@mtWl~p=f;@gD zTbn++O!WskFv8i>Tf)}sR`X{q1}wd9b!Q*f`}B44f6k1A)h5@w6T~)4fXtcm+f+z} z#5fUVsH5@W!`jBprD+7C3RH7Zs-TX}C{Q>x#M%QX-<0@T()wdRFd(?rx`jPJMF8hH zw#IQ$&rv`L<=;M@1?VXZxy}B1!mTPkqkJzi4&6;Hzkzo7sLDIQCX9)2gfq)T#<@9_ zYL#h$O2L2>w=7s0z2eyl$z~~0*9l`nTdJ`GD(A#Yf1}baW*$hoS*2$VaK0H+@bTqk z0_m>nZq=S|Z90lna$ndF0BqMS_FIB!rFW<^)f8d2u(B5kHn7-+yewJZBcBCI=cl~P zBs;te%PQtA@h1Dbrc&+$HGvG`9>Musw8chgfh4OHn{)Xlt5`E7Tl4 zw7fn}j*9z_F;u2LzI%3=Gk(XSn|F8jR%CfedpVX=3d)eVNXJ)`%(4pyNp2Y>!J&b0;AtxHHA2mC^_)ph}~2QW_u7>co znyjpXdQK@@|Gy7!JesUs8+{h=ZWSoKhKXWSSB!WeykjMmloM%wVjE87Tv2{etOzdz zzZ1ZZ1^k~AY+Ez1Wv+F_h#Q0dkr@1so;CO@k9hDu+5r4=iNJi71Piw^mx$?S(y6_n z;*=CHh5d9k-;AmEgfWm%;#Mb_)5$Eidb)NJd2cnzqeosE6zqojN@9hXf5(jL;l#LZ zD!rTS-KShW;lxg_4)^Km*hB8DUAsSY?o~)LGNz0rca!=!yvK+B6 zHRO}jaK72GRJmx~kSCkXxtw9m=A;w1hTk{S@QFlJ;Aeg$aLyuo(J;t-749cAgWuDrp-mXUWtyekn55C*>+JP=*v^Hu zJbwn({Va=hY;^_@hGM^wDIDBep+<;FFsn?*)|*y_M3aGO25HtFKSF_0b3r&uOxuZr zCAkHb@)#ZXPSjFX5(D#Zd$RS@m_~G5lw4Kk*eC5>rN5&nzZ`XZ5#6w|xGuLUx@9E> zm$<3e@~a&cYGV;G)Wp+aSkD8^S~8D=>^#!9iHJBWLKcp;nTe{jK3g@ul(!{fJ%PT! z^xHBMo@Pe8s#s9FLx-^Yx{8g$ud~VR!bJ9`B>cOQy0BJB!>4=0^0v9_(;dA$uNfoV0^ts=9@V!+>4dNnE(wvH@e zVFNh8zYZtWDRE}TvHJns3l8txYwiW2XuM#WPCKztCN<98%7jUkdsx8K#0+gi{}aLx zUaH6~rudTGdQ3zsc6OVaOkmdU&W3SV>|A74{hU1rb^Eqd4{lwaUZxumTzo*(Y&JXp z3X9^I&&qC=>meLP+oI695x!@xquuqegO@tGg!aNIs+DKzhO0?&^Di)5hV0c5x_su8TvB8_@%;rR0knb(UWMdYRAr zmd6KA&?eSOm8WF2;($<}QN8IesukXUqfxy+i+20bnguyNn?>zt$uVG2ie2R8J>y*M zrCoC{i=zk!gr-n5c;Br^0+fB{+G1aBI{NV1;-;s?UN<$IwS`R8-q`4zFE%0S_C|y*UlHr_Tn6}_d0&$L@BIPCDK7NpJo^Gx*uKErVPnyQPY+ga6L*s% z5`IT$^Fma5cf%uo2%{d=rjqZ`KG5j7za4bLhNAmk8I+f$+vsndoLU~;`JCV!-2KK3 zJl!~f1WZ9#L^t*oyDPKiQ=P4)l^M~Bra(t~B&wJS za>{ii#$~c9?kL>Q_a-(t+@L&@4d$m28{#^z(tA5e{<2V?pM*oEo#Rueu@moBKh_VO+^6W`0%JDp>aii$f z2=q6h`@AWu`!r&No$NqM*Y(kzU+r4!BZ1UFcH1X{_Pjn*4?K<_fdz3zXX1=b-;^xi zk~ny8=~Eo`HP#H@0bB0w#4RSzp=mbq<+AOuE<@-dy(#GNrfEp6p!<9{$019*hR2Ib zppf*-3*vu6rV&{zN;?99+QcM?G&qDs9qe4`1Hh*ff4twNbUrdq*aJ4(#6;&eo6B$< zC8|Ct6J72t#1bD8yYt?zH7|hw#yfytz;?h-jpen}FWJ;Ln-jlsvO)84+}A;^em>XP ze4;08NJ6yr6S!WhOGjj#1iJqI^u0blUS+z-C)DV_=^_Pu+wF|0;p2V6ig@YNt!{PY z-~}=PzdZwd*4kG3U=8qrPdaOQ!Gq~RNJQ@`V1kF+S+4AH3s?5*rvsl9%6JWpz*jrz z+O2Ls(>DLw!pS)N(%O-C{1d(E9fhTFm~(3ryRa?Csn|BBQ5^QcO<}LvmIaJc zaL*9j4Hr(~@tt9FUu$&3lL{-=*`Vh)H}#F;qjlErqHq&(LryhF?ASnYW1DHAr3a$+ z&BgO7qxsUimtU0|?8Q)U-uUup{-am0H>qQ0G=C+^NtovaQ)5Ye5fuUtR(cE@)NEh zcrKpFDrWQ>foaP3In6_&0VE<%%n^;JHhf~h?L?1h^xHgdn?zVI69IZ&0 z*dDqX_O`6OD0Ue%af~C2Ca{Ud^S@@{{2lTNQ^UQ@6nQs=zU=gfoMGgX=0`_$P6RMmx6_ z*RGb482&4vrF=fuM3SSxVhSo3wA{9}xV5rd?5|ywU7Uet_B(bT``8B8@%TPjWBlFK z_w$_M@R~Go(uEmal(E1(p)+^Wv~6<6Oajpz^%a;ZL8R7u!%8#p)AVLjayinQO$$Px z{!a5qD%5KMh|I;NvNW()^zSJG5NGyBa~qOkBWR@-n?#SZn@!ooaPZySXJC4uzuA4ri)DrgqiFmf3c~e5p~{a<4>~2%u*E@!bRBJ#Ez?&&%}7x8G;k|IBcpsld0C; zbJ{G}XQmxrR*J)cVPP|w`N!9s`dj6j)5X||Gfgd&VRj-`ruo|&fL)+CGuWkC_&W|e zJXE;@)WBK*m3&-Dll@h}x)7?H09f7j{3-iVF`!CdF{%J-H=8=!F@>f`9t1o!7!8WN zbjA=9UJ`>MPlMufg>moI?WCLH5CaK74Z(=Ax&Vw{sJYjaH|n63kM3RGwX9;ndgF$U zN#K;vl~)3dQFsAG<_elv8ePNw5aFcXZBTz2usn8MEjC(vyZ|BR_H zLM5q?4xaK#+w?thScLN(uK_2P#yFvZk_ldelZwluC6^Y@yL6@YoLnw+!ZS3j zZ8=~U@AjW+oZ6Sqd-Bh-=JwSZP2<(iUc_G`lr0Rg%gf}p^0|_71-o7&6UfFt144+fB;;a={wD(dh3dDhdZ-aJy!(a{fGlVJZd3$gRD@|N>~hN>gKl-qv0=s?OLdcW;1#+CL7`B;%6Ze zGkO-qE@5WZ`1#_RF~VfzmR1CtgphM}aBSZhq}a$&0$8+SWl?!71P-xU2>wH;#_riK zqiAf9;9t5BM2)brhSzM~Ml}R&FaGE2ENDl(pm5h2z~VP^vinRIE3 z(^P2=tLAKITTI@Dpn>|_WPLLK+QK!NWHdwiG9<_%wFt6>>MTxT8dvOh3i@o)5i83Q@jq@K4l`!M9E!EaCF?mL#sGj8;%Xp7+01UgBuS{wj(u?;#gof zh6`zQh9EXGhFCB6s?bbb!9$GwhjCj9Ua|uP(Ie!LNimG!3&EtbUY;}YjAcAcLI$B$ zUFyE8xrA$>w+G9%(e~B0wnZz>?;I1=ma6kxhs#iH zr3W1FhsrkZlO1kOffx-N9HJ%HFj#O04_lm?g*;wep`Dj>o368Oz1SJ1;E4J(S#3YL zgbRUaqrr%8^~~&xt^0TfJ{$nj78#pFb z96%X3Ow!jhb8K<_$H`IEH^N(~I9a#dj2I&%Aa-fn)cG%QVXtS zfidx#1&sEtLR?XCqJ%cqe}1Y4m+r! zH8za2o5BxGj;X+O@v@YKdY<5JIu6Htzo`@)k_M?HYCdy$kjPg;HLpWk`xf_Oiw~ln zY#0Ipr=-{SAp6JiO=?#;oO4xU9sw=0W9+S08*@d3Lc88A$o1z(#HH@$?%u`jr~J(> zO*4ueqx{-%v#m15MeAiCnx@HhNh$LB^5+do>p6@Hoi(a2~JccU35Os zDjvsE!;uE+*rC|S?ntUD4)S_hS9mgo;e&Q*Hp-N&5NObA3>sA+hcvrT}?G33jI+fxIL97r`2$qTyaLQLVpvBq!{_kA|4&GJ&*Ev`UtEDYCIn*XKoUp-M z4&&&ta*K)~s4x;8hV2~w_5s_}l!A)LHmI@4XlVG(fiaf86)@Z2`0e+;)*9Xu|Gh9| zj9)7G#dy~r4fV;&^sSc6k2oIfpH(kannEF@gWtX5bOsO}ppHcKtUJC=M_R`qjt{`^B?V5lZlAT`b-9Cuxjki5oaL(yXw$~SGuN&F4 zV;qlNqaiN|W>ubsY-}?uV`o^ffY-&r#O+>07-6m+BkwX}DoCtoX!=dw->rZ4OG@Nd zqEJnEH~WZ-bwxL!VzVpmX|Dq};ccX2lV}?y!bZCYaX!I2_!wD@x+kLGI{}jl+IRovdRFE;QFi|9 z3KH@OY$wHBwt!*xsX+Jc7Ti9o*ZtH>2C0x%g?t(e~SW>k!k5 zUwoUXP=Z;d6JpRNZRj2<#El~NSP{6%{c{N`#(;qn6oomqg{%3;?22`*)rB9Udi+=B zWHF(~GrJNkcwO5`d;cZh^p#7yMgi=>H{};G^ZlJtZ`u zeA}lqVG=aqcxXa7!FWrWz-|=4(gSvEnqaCtDWTpmO}OY8w&{pSx^y06ZSVe+CM;3K zNhO+ayr2A7ge0X2D^zFnsoZf^LKDt@=QLs4vtpWX_LgV@I0#8_!4dhEqPW_zPC2Es z&}Q>S1x+MA<&@21^dZL}Av-2Pd-8KBHA$MP$d77&Y7go{UcV+SWU!X?Kq};-4p3C@ zc7Vna5uCEbc#Q8j(lm+DMkp4;*qp17zqV`-L{fV?CKlF&fU+nSO`!)g%toiFD`+Eo zPfR>24o>{(nxh3+99hVQy-GmoGgic0go{3spLn>;c-iRIOyNk2eukQ!0z6ffGE1fTK?lRAG1u^H2U5X9&W}Lf8#6o9K2Ye}U?!sq)3a`!r-X>t!5^{+co5QjTfO*;-$}TW0gRqzf7->Tn?yuo!TNK77VM8;K zT9pxZEPByzoVA5J+NK@|$qd3b49g}8BnlW(wQoXiW!73OoRq+33$hrq?lm?wp{FSr zFJmTPqwiYFu_25}ibO|IGa2*DSeh^bV(vu>d=M7cf$}Vle}I~O(OWP)&;B(<7;oX^ zeX}i5+5*ykK!?~qWnyeY>V&ZG$3aqT8kB+8NMxmGz}~=RnF}%45pyl#Gb}ft(2_c! zWG8Gwm!9}djAG^wXm9%YV&>;@V$4;>tcj35a|g6H4H&pgB#LtG_K)Ycn%A6^?Us@b znMr%Z*Y1ZQk;oKflQ)(y2v)yT#tQ>(-Y4=iv#TeNASFK``_~KIS)a%kwiQEJ!@5K} z&W9@$QOD49qRC`@(ud3`5rrTA1i3HtCYnt-;Rz3&q}-ezbrfn&OgsiLONMfZ^N@i2 zq}+s@9ZfVzPuJ??&4BWXKoVjIsMZABA}1!_E$;|+*%|R9lLCIq$x}ha&7$hlIUv2` zASI#GQwuihC~ASMGlE)}Dpa*hD;&=dBEflR;jXd0J^JZG7$?JomEh#^&?sB(rG00H zMQ%!p^#El>()EAfZ)eLTc=Hk|KAinq)IF*AQ`Sa`9xIpNYBg3H3&E43W#_s>jWmzZ zO^QknT}FwN$tmk5MXhxjYIck^QdH}{%5Bz$u%%289X(bZoVX~2Ykc-09}Oj>D#MwF zwgjUThb~aZ)ng28ATQ2SNHx0SbhWf)wPB;a%OU*}$7m!$ylBm(z-TN8bZJ9&u^~4v zL3FRpMzlcY`e>!lrcl<6BrWNyl-LTMM5ftTGn#}^VWBsT%WRyX6BbPo1wGpJ1I7Y~ ze=?#CN$!QV6VoeE46jrsD#06GT&1PvVsRBHm=M=d{@7oO!z--84P$T?YDO#_vQI&$ ziIgy=g)L+0MBv5hWeW!zjYmLT(4c_~Z`K?0P2w0+N~_EFXaRB?i+RjQT2^<#IDDcB_ub7lIP2Ci}yj(WW}%M|Jy)@t}p+J|}%b?4z$`I8c8MPHl6E zp;viFErTLo-%;kokI|bXPQjgXCx(0f=4M`R<=oE5YGUFsoiVCvopww&X(?-|R?U;p zTY6;J`FouhdAy;d&bQM^Ax7eK5L9i6jE8K8R8?8{LY;*#n2iD?9X&v)@6Dq+cIQvb zuro&tu*NJsvrVfV8=d6Tn8u@kBt9*y80B<`3(AJrMHG}R3~U6Bc2(iS4edCO=wDuG zUFS2?uuPUmO56s~XrUAF0I(LsQ75o|Al^O*gK5bs#jM#TER`D(EjVg+Gi-M5=buTV z^G13HwER2X0gATN3Ry^_xJ&UNej5oxj0)IVu^dq2dl<_Y2JvJew)G3)mV7q9RqV-7 zZnQAZ1wcUuBRm(9&ShXTMY$hDGB%c`O4zfRq0B}HvQ%LV^ds9M4!Xq!kzW6%%PekHTsiMO;k< z)wdny0b^Slr0BabD;s_bv$9PwVI#C%OJg1-sQRH(Lc>1$g|U=b=Olvaxg!PDHIy7F5qQWtKnq>%xa!bV9zlX+$muOC|Sy3zyS*-{0^Iu`_mWmvpn~b#wonrIqV_y+tM6lant>s z{CQ8xQEp*M$BZ$jbdCIM&=0O15nk%ou`WFG&1Y#or2X#oFQq3%!(g0~gXqoI-pj=n zVv@XyQcS&Ql`#zoPl>kDxB`s>JtcJAq%W_PN_=>9+Xnd_Qkp2e6mj%}{Yn>LDZksyfF^ah^)Q|g1LlV(dL3~e2XUXta%l{_x(nS_ zcwf4JRhYddg-f!9I^;f1^ICV|N$8!FiM&A*<0>@SCHcJ;{#`ddn#6~HYB%pS0OJ*< z`W9cr=Fw#(u$Lr!G1zE8eV8}R0_RolJ$4iUNh;_ZZwMCvV*G1$vj&tWs&p5fL{c&Y zHBKHS&D!^5a>ng@0D`9k7;1)(<0R>w7>L}p1(0=I3xILlhZpI$wgi2h@|HxFWTEGT7+lVV+z&j5uK zqJ{=Z!gcnI5wNY1cYD}Scvfj8XwrBmP2Qi<{`4Dx&tzY;lKwC#dc0#7ay(jfVFJ~{ zLEGv0U}H7g#?VJ0DJG}Ii=y1ta+ig)ZPT%RE=ID=DYqRO1E~vab@W_C^lY3&tQbB( z1;#&yKP-Z^sxl?H!%nq^(^jg&0js!n?20--5K2{XiYl&5D^|u8@3e|i+oZC*oiifT zT(_v2lU1`Xt(hfgEqW>*cs%aqV|VYZzI|$>U5)gnjb!5Pm->o_jqZNBb>FL+lT@=O ztyvM*ymss^r&+}w_uAL;odl6p!j7AKjM_sYJyG_?7bCrAqls?kQ}PFHe+YT~eVU7g z8QD6bS{C25IJAWARkD1c9XXLOyj*y8C5+H{Uu@{uH8>gF09ddmN&mu1*$(UGZ(mW- zskl61@^pa&Q5?~~Do#E?(E^7kOfT>LaZ0{zwq?140r#Gt6Xtg`O93SHFT6fd6OmIi z-dK@C-P`Pq3V^a$@Kl5EH$;ws#-ZR^v$rcP z6qWV^m7W+;qa-GCj*P91Emcw+N()aWuT=Ss)F9=z&Ic*|#JS3`#!K=g2?46Wl6n5f zoESY9frL=>T&#%*lzKan;3gu_W2((O?;iNsRG(WndVpW{^c&K`2%(B)W4-X6B2`nr z8KXWULU*wbnLsx(U4w`lYqe1)l|Pxv8!O^gZX zEy9_+MkTMDi^^U0C&7s8;MHNCa)+kvfSJ^p)G^LXB1d>WOVKVJEhfjB>7<>g*;qen zZpa*0-SIQY0;s=zp~dv2vOlRcg5>oh>2hPUbLVn z&st?nz!ZnbIL^E?r|_hEIDvOTf3}VjK)vBx!f^Qwi_jsK+p;@=`2!}bZpssiY_l>$ z2eK&1g1ww!i-;gf(ea>-UHy^#(%k@MlM9Gy4)BMJy}G3yS4iPuT*RKEN^V90q>wy$ zlR^qG3w1Ve@JXiScTfWC25X?`EiyIUBIDm#BRH|D2}%#EbpQDI$`la|5}0lubb*MP zW5`(2H7n%2a)q$=5!cD^w<}CNE<}Ph;hm1{(sL6sOPghc{y<)bg;k z+&3<5InKvRdf9Sc4p~(tQ;Yi$`B6=>VZzTW^n6V*+s!|sZX7Gj(?G^+o);o|C*G3_ z5x5tVmeMp!i@gVi^tNXFEU|yWA#oZ?#cGncSHGkR;+1V4(Fk@fc}Q-OBcoL-8y)hl zjgH@LF7t?ox9-1C^rm)ZdMg|1C*0MrXU_}+o9vU(tZZ0 ztfRC9u_x2U^ja*Vn~(^3GP#W(&X23strIy{dq#en!-XNjl#RlaO#m=c(6XXSLcZ3L z`n;cSj0XZ*Jdntz9oaPA_Sr^q8gvl;qW}0jJl=ipjQq4B)^%E{kG-Q7G*i1lONeR{ zVILsDjSK&}n^CZ`W;m&E54f&}W#-haDg@y#K;qdIOPc2?n*_J{d3y-7m*_B!v6phZ z57{eKNOEyDMRTnt`I~8ij0K13FU^XmY_EclHRC~g;C9z$p~q}I20M0AgUg#cKCa)s z=8oAu#Zosd)P{E7Hdm8zdlYT>cf&OuRVL$+7(^JVwFi#9%1%NEY}|y^RK0McTBt%9UdVZ?B_DhQK9q$t;U>{$h+)8)CG2 zCPpi?ShJ0e0oL+F4>!vU>$b%b4R;P;^4 zhn3ohGtTDT%V9eze#GA5238Qg43UT;RSw~F=7^al>o!8xEoR5@(l33aQf-<5H!$ppf`_IzqCS8(OqnwtKX(jG+ zDDAVithL|Mn%e6n9W1&4fMD%f=Ao#sY;Z9Y{40Y3u=FfLT%%HyDpTY>1?E_Q7$lB; z9eKV;Tf1JS2k{m9ZP+bqSCC*|#hmv)W(>3;%K3s~>c>2aF_7{q)(No%|4qh#42oT1 z4EWU%EIuIGO2TVYjNJ_(8M*K@sF|SK1#6J<4~oAy8ZBPJH|XPNzF{-2foZ-q*Fd3} z#Sv`7*j$4R-M9xkM>Y5xpXBu4DiLPcC5hW449h%sdk#FIzYj^SEUtrxRYc-=r!kQ?Qxda0>&I>Rh9`WrRvY z#))KvrKL2y)ZEI(dYkICCn!J(41TSi|H5BL2LMg=myw#0lxFSDA~kF+h$^H;g<%mF z1xj8eg|gScu?tND$0;cm+g07<2#-GU z3Ll5-oDBqavd|L~L17}yq}PTvJ~JO>>c5Kno*Z9}(<8ps;I??Ye4{PSR;^N}Otz7| zYLVPeVi^Y&ZVgOHUdQf??2_@`=p+*)oy1Ldt7&nA#>6ZfFY$c_m!{-8xVDtj+&Jh= zVYWZ`7F)zgwxDxdhFWEv+Q|mH@5zOo!Y5gzIhiE^O=b$XGN0Trk)OJgJEm$-Fu!bv zo{HU7cMCRAaEKf~2a|J& zpCeAxkkyWe;K~@1E!ac#y0&m!!-<8)@O)$W?i}4Dx+S&@dfGf$8rX|T0#>LEp zT??F!q0k_!mdFg1D05{EXkCwS)@|y}Z&#Q9D#%yc(crebAU`GG6eTUvi5+#8%x#?e z$OZX#74rpBbR6(iA*gISVN`o^RK0(aQ3Fg{B1H$GRrE%x#z84eykS(xH zh0Q<23j4U2Ad?x6G7vcEfp7DiT8dcX>2XvUMUfmoiE$J+8bgV@)n2`$dxTj<6rUxco-0pZcKDu znaDK&I*p1E!ej!W$%ToN)x^ee;Z^m?u?Fn@8XwREx<^I2e4X86X&{ zod!`)5eKN}ZgwtX1Q>5I3`RbuCgf98&Ss1>>&fTZiJcQ6pA+5JF3xux+)STU{68qv zD}!EkZg#GT`t@EW=!X+8t8PSc8fq}3lk)zqDojb}^F44^5Q!ED+-;6X;RiqI#Hq0%c5)o;{a#Es<%g29+R)p5XV06-mAkF=B z<~!O1^Xt`#F*U@!QWP0dhj)OeA(bd&)DXBsRw|&|&F&YMhY!=OfhH$c+ z0N>E!c5|qL%wI;BA|fV5q4t(gL3|4cL2F)IXehLb+J)$9b+?+{!nAr~hZjwgCw6_U zp4f?j68j*r(sp%Tv-H9Zd;ZmQOk1bhI@s-1<6ylR@39lILq zu09iOwl;>gv?o#0Ry`rT0}Y@$@r?3f_w~#2)3+JZ+}B?*!f14<>v=9WVX9;UD5PiA zJeH?Q<%RA~m*opZQpqGS_it|TzT&@53%RepWHz$1AG1+i!eV12hV#QnH;v zJ7w`Uetf^G-MQthMn6*u?z>YP>fP+WH|E?Gm*?}vl>FBzXvf@Z7FYT`s|1jb07>}N zvE6G_8dg<)IhH!JJFQp|SG;KKiW9A3Hx;vu<40VSWH^l$#PFDlLJdSbYYn`277La5 z&XcpQN{5Zf$sl$#I4Cm0c_PGu+=qWL=7ltMzMR(E$RIY!nz8Q5D%$;Z9(GtQ69*9; zJZ0B@vU`NG)}8MCtOB#6!HVcn3l8CD|H*)4f*4R?8GA6jJTeyJMeZfl;?frFGI3@97j%?Z?Er`$%cQZ$0O2*)r&~=yXUVkFVqWm zXl!ZHsV=p=XmzvlU$w%@l~l3v+f%4Qbo>2Kv${0t6)M>@!l~Qxlxz~af}O_V?D~!3 zCzmv?jpG+g$#0-nBB7LsFK!kQ?@yAMyQg=Z5a{sfYIcuanctQ(743k|bXvDi$C74G z!-Ld8XdX2jMYC7qMPD>~7L0|y)fKKs9k^EXM*YOm5StsnHrmmIfg#>cj6rd%tiNHB zCli#3ZC+?82EG1LT8a{VkgPl`kn)?xB~CAzio&0M=m+U7aLbQRq6)s-P7WtZ-&iHDJ4@q_vvqj(l?Yy-53UL7`=-#&_(UQI)CN51SZ2K z;exP93o9PSZ3G44=rYl$!OOT5)v6M{?Ku|Lf>Bh&AeeFJ`5d*Aqa&kN{$_In>KOuYBk>VV_= zJQ1Y+37&kw$Ldtl?q5@bABV9W{`vQnav@Rso7;=0k614|>tag|dm`nd?ZRw$;Sq@=OkI&MY-5enjyjF> zn$^I6LK$MmrYlCB9(82BX0oyjm!0Fct74k!0ZEzVjV~8d-PldBtfpDVbxN*Q&F#y& zM4=RR$fp5THSD;sWNV2z`pK~LZAc1FcY!f~% z(wc!{CZG<$#QL+Y&p0P3*anSrLT3|mPUvjxrcCi)#yOQ=2@Y$7WHj1sh;BFPNR?>y z&Uhti7_Y={$}3GZG?L!k(5hIXf&Y(qrSh|{j{JG05vIJ-51xQBPAyx_I8D)8oqfHk zH`3w;uXK4r7?;F^5fW3t9~o9qGE29QpH zjv~i?aelP@iHI3xJk*l6eL7yA@KCiS9x6&wYte^NYYESmi2n!A6o-}Sq@;$>GNRpz z@of3{gr_2VS*`Ax;Xg`%YXR`tmh?8N?vX(1yYnh zS+>YYc{!@YN%?0uDO;a7$tUfjts{$)SgD$nl@imWWT4RqP71n#itlY9+*|NYqhezk z$i7zb@7-zn7QB;BsEs;^IgPnhY>2#QgIA5$Qsg{nc<7_WW zS0?SAR}xl*moqNZgNCRvt
?lFvcUpVi5zSa*lJa!CIPVQ5g>{mws@3~q?t%kkxh!P973H#zuX97gvhXM7Vz5XO5ZW4JSwR3L zD~OHNmgra3Xj4nz)5};CEZfN~YgmWt3JnL?c>U35vnbSkER4lD|OR;1Y!|t;bU}3fytEI&l%gsNc#}>?3_&9b8H{cPmTgS2w+IERTQx|L-!}UCpV>TNH zNTkO>T9&vF4iAf=H6x~~Z5>}lP1A-_i&g~bjf6bq+`cvxv>xzWQy^~SK=Z^+GA2g>6C%)hNrP(OBZ&j zRb>z~Wre3urag-h-g}9bRt+-l7fm6VB}B4?Xksh4bMMMe1TU_=i&$V&LIE2a!n;T0 z*Q$y#o8H1-5!*!F=eUWl3P0mEBf=Zb|_H_rGZf(rV$LecPTv}ee2A#ph~!77gZt>M!gB6xG9fcmxX$W zS&mI&7In};4)&6XtUBT*x?w5c*`tcP)7X=UhwNPaM<;zWTjv9>>{OMJ(*sy%1UGJI zSvI{;1k2vdE-@@4%aJ^VqQxW13g?3Oc*o7wipZ^&vq9kO5CDiIr1jG9&2f<7*QAXTTuxPXulMam_7$?`VuNJqFL zDMmVK`2Q@plbS+eV1fO~M7tc%v8{Hu*Rm5zFYe|%lwi!p2nr6Yck!%|b{ z$d}OA=WTR;D3{v}U*D%kf6^xl$>lSPKW?u1j)j}<7(~%@pA#+0T4HJ&(86$^tXtPdHyRq zl0Hsw;|KCHk15ZGS%-)ykzeHAM1J-(CE1rFb7F#jBTWhZIFN2WT07DQ&=N;XK0*AF zSUOr`Q^JnC4HumeOmKf0P|AUYW{aRk9QmN-JBPEIxW{&JMzQWMA3z_79rErJK& z75oF0K7C|UD+JB`=4(xbGDUup2YiCN;FC>L-ea*tl2P&9j!&jRt+!4VqjBp4eB4P}sDy*x|^jVz5u4;GH!2g*e#_k8FBly`oK@`6y7 zWR7hlb(`WCjafsSHD6bd%ES=@0WfU|sbEH9q#_c4ZPZv*)byJmz@D6^6vq=d3!fNd zvB$(X*frb3>l3-dD2xR6vO|YJ+SwHuW^6H5FR6;MAPT31^p-4u5*1PIl<1AqPW`Hq zI&4%a!UJr~;?rI-ZubpQQ}A8zw2MB3LOAiq$yd7AF+IjnFEII^CD0bY@v}UbQ@vX- zCm4+mtw>v4<016ybXpRS;8x=fmwG*db=Zsazp%BCTh_583u{2!w3abg)^P>I7omf9 z_^U3-9&S{3h_>PYX5Lmm;e`T#9T0V1ap5WI2*F zyoF5j*S3X89}pMoYPQA}r)P#gid2;~!RmF7RaoLH5LE<`Ww`rgPAy7a@~cisZqvYP zR>NB?f=?^a8x`TTrCZ=Ca+pv+8LcO-2FNBRaj0J(u6O2HvZ%Y z;jBPIreT&Zg87BAmG!ZE0@x@Y26ne4$IG0m^@w>PF<6Ywv|nmdq1CnUHJ%B=L3D&h zXO`*tOs&8Ul5@3g*zrs2gR&LZqs>S(LHG;}qiz~k9NmbiB%v%L6lO5A#^bR!O<^Yf zRr8Xwri~DeCqAsi-*FgdcH-~Me^9zEEo&0FQKskGA4Wi;=qb6vDIK$H@lvJK7&&xI z`7)e^7K*^y@@!QR4L7+TKboK8CD_(F(hW#-$-Ci4^QUI=?n{HHTJNmy#Qb~mJX)Wj zl%hP70EJLANg=VQQxh%BX{2_c5b!uN8eOlRYoZN}La0RksX^D_i}8sy^!Im|}xHkz#k|Ito@~-2NQIHBlEcH;{DBfSww^USB{}K+?F-g} zBK{-%dVPU|Y=%BBZ0zh*fljgP0APlwdPUSpXPJ&BchckeiAM}Ga07qml+mpZvp8yf z6Gi;Tr1dFkeN(QppaO-s^(nq}wfxNE`ANn8M%H*Ler) zgI2+&2IAh>d^mey*ZN&RS_Xi{sN$PvYFe-px;XGNNPDwdqB)qEBU?LF&L zS73eA+(w$C(Okl6g4i=FSLTYid(&jp%V!% zR1T@j3wk1MR6vLwqA^W*P>uGajSliJBsAW!DdR@3Rij1nKB(26@>ZMFYLkhN2Kzo$ zTuBp}NFTq5up*qALDO68FG!#b>_9&*ZBlmlQU2+$f_rU!zI9s!JZ9OI>1+c35NH(^ zVtcSBBdBevP@FeO(sp<IqWq!XZ``7)y z%-@!I)P3)%{Pvk~?)sa#05k7$Fmo{G9(C_)b${(EhiF@W(`JMMgn4{%&)#cFRtgEqjVItkNz5{%n-I1RP;LHNkjkk5}r6UpocZmBa>o$96lIM{1X| zw5e50ok5Wa>irHF%?I+o*q1?lfhb20AR|G$6p*1Kz)&;|YtRu%*H)HUd7|tsq(E_~ zsk4qFmuiDeIxWBz7yymTYLy0z*~iC!-I~&iGg{7D^p9k$xea z#_v=oy~_$wh7!zptUBogUsfGv`6X+10)jM4ys^n2BpaGg{nWcP6B?TjYeXVbz8Glq zGpGinOdHa#v=W2T6>O;H{N{JC+SK4c;Kj}GNR4l(?t}<$bc>$NcYN^aoaq>b)xdD| zdSzslX*NL^j%*&9MP{fEVuXCMEV7BK5=AH-@gYiMtNZt)oSM;ze zk`a~ZQi@REvH@u0R;43YKGq0U+6WLbZ3JsI0#OuRmydvkMhrn*{s17MB&LjpFj1J$ z|DZJ(PaoRNqq16r=u4R5Go(p};TB%jRD+ACOE&m+47f=YY3EY4gW-f@u&Wk`!pyLtJQK5!bWTQ=%Eb(p8x> zjywSW5RqxRkdyysRUv=SWc04Y{h9!=<(3XrA$0ar}r3NqG1DT%M&WTFvR9l!R`iX&zwbW!l4|L#^SX$dzXEU*I zLo3i{1qAF3904`Bs`u8~tyQu(QDyn&RaW{c{%$K^(N`Ni^2|ACxWnJt3iWSRMNnQ@ z%Yu^zE*hjdt6_iDXM@b^^t9fp6&b!C9l-*E^dJ&-ddkO|iY%u*K!K|OvT%I~0F~YX z-YA9+_^<`|ZHP;38UeqJwVmO-Z4H273cz8TYy!a7)2X?k-{hwrxE6~@45&BKPift* zIy?VTKy4ZYsD*XOa=7>YHos%0&VBi}`PMz+NlN)2!$LSGMc<&%0$Q<@Ur{jbyJJVg zGU~U*-TB-6e$5-9V-O>{PK7cz?uGoJt$#6Q0x}r06)67rm;1&G`Ht2mpZ3%9uTuN~ zdcG}tfivy8dNCQ;uMBgn5wga_>KEx)#0lzfSVAT7v=^ze7*%VeTm!RMC3=+JgWv9Y zQ;4wg*VcCi)_5S$8JS|?dr&y_)@wubQ6n2|O`Trq^mx47td54T*1ANDR7VWuPCWyt zmHdLJ^)}RjcmONY0j%4sF%=(R0E6{KFWZll*%@t4n_K&O8N}mzsBf%Ktk=Y8cTK2xBUpj>RtX`}~Ba zg<3l*nhM8t{ysmm*sWKK)oQu6hNhM@lv7SxV)f(EGT+74Qaa*mH!BPdG@!s>*{NhG z!|z1hnoHY#=>(?;B-Z#!jlq|XDc+s#_rK3~7FVkF+!JH9Q|x38qr2IZ-RtQ z`GP8zt-?#AJBk;R2Q$0jK<28)ztJx0MM> zN-RTTwYs*!i+KGFoei;QXG(*^nNUM~kkyUe26253HF_Mh91mRH6t}A6Jn=KM zt-CQ9ec9k&8UZpJ{BSxBzniokqelOFar0`M(rWSWhgFFtlH-Ibri9U`H`%DoR#=zp zMqSOK6*gH(8OEY6$`WR0@)cGz2LR(AcreYb8^|t7!QB>?sq& z;rDHSB81!iOhl3{``=hvsoa6gp=w#7VHx4Im1pc%IE(o&SipSL-Ehbg81#;AKW*$YV%?*2dL zgM%-uh5KU{QF}(V(b9~GEsUy86qEM^IB;QYd$rVuBU5r=eyb?7OC`|`%JRQx(<0aL z5*{Iq+Hlrb1}i$ABQ!@_9W*YZU6?brRQP0fFJuazCkq|f&1P5JQw_2+JglMFXvrn8 zGv}FO$J%@+BDZ}?QWP1ftQ0ev5u3R~VG_=A*VJ}qqL29`2P{PiR2flaR2S9cdd;fm zY+={k$b{kjB~2fkde&7Uln6z6J5OE!^AazBKG*bee$rIbaY}BM0XBykUtJOHf@5lZ z0d2(-?tL%kzn8hst$7(A!9TcXUshONh5PT9^T%Z_a!0>{BlbnE_)31#t{2sIRO+C} z3RHmQD5gAwY>)}<3+rlM*cr$0WrP3X9;PZS{Na^+k)JQTnx9Q)7r&aH&eN^0=8wz_ zyQbm%Hzt!68IWrZwyP?mji_u;8HeG0?#ki(XDFU|Eq_pE(TWegmLHeRT(;uV|Cv7_ z!|yL%rxkuX{yTqZ_Tc7z!2Rx(4%iaD98@V7uC6RvmP_*?!9*~K@x=l_=fkD7mS-*$MWd+P7`J+s6? zjDI6Pw`S-{y<2hA8~L59GB;hkDEM6F=8Inoj?diUKAs8wGqcBu=2Q&WN-8|-NgTwWBOkJ>w$I11<_L=&X zwTSlB(fViHVGY5I%Kis$aNlhRK9afJJ=PHHnOWs>x#0g~?r ziX7kWcCY!DGaG{gGppTK8?DIo`srWs+eUTn+>ZHR+JrUQpvnv{%h*bmu|B-VeI_4t zX6|)o=7V`z`d^n1cBOna2(}xamd9HpF1=3>OwX)!p9+G{s`R}<@X74@LDv}uM}a=y z4THTh_q*G|;A7dR2Hm)E!G|V4Pz$MpT=fzPA*)qiR^%M5WH{iyKQ8!j<`=GGd@wEZ zpqo2BI3)9cJAZuerOZ9=qOCHA?#fCehF_huCS{t~EtEAX}wvB7T)69Bv68E`Pay6cW^Y#C_IPBo6 zG!UP+B&*6wC0G@8xyRmJ+uHdp{<%;aSUOj_uLzel7OLiCx-*^CP2=^^wM`7RE%s8T zvLrn*QX(<&)GHWGtL)*QQJ=LV4K5p&zuCC0m1=8Tp$fkpnX;KKG79ypfW*?DoQ@4Z z5a>_q)r2=T@`*Hg@+RV6f4+V1GbqJN4Xq!Ni)u0&fGWyW53pK!?0bA2;HxN$?10&@ zud#8QyK!Tc`|gsqL&b;53EOPOqP|1r+Md+K$?_ zN6l#)S4Ak@%aCYgR`d2BJK)_oBROn=YBb=h7W=Wxg3_9QG-xewSn_X%68-9jJ(U2l z>TAN<$}fH+#-<@P8VFwNvk5j08ZD`aE?#zKj7=MoW)kcGo9t6Xbo1I0GR?_s$a-wr zNRO7;X#_TH~EuzTReot2j z8AY@Z5LsOrX3~8x;sVs84cCh}#M^$j(l6AexG!ZzEZ_K>`0kgg-j=F|z=F0eOf&dD zC=(-Ka8*LS*%g<#*QN%|#f4Q6Mu4)QN&}gZ?b4n0K$bcpb)C|q8*LJ$T`BdZCw9I3 zGwP=lOJaXlR?Ux2jYKtS<{PqpU~*?d{4n-ZqbY`S5u0gI_Z6MAH&}HD^#?V+&^9Pg z0-QXL=yqeN+aYy3Y#k2S-;MlDy4h6a9-9{IRqWES)Fs`RPa$H5fPsLUFsf}Os-wc_ zrEO|N$)2FaLN(EC>NTy;LTl{R%Iji!cu_SI;*1c)x|zu0YQvYs6w4MqT24J^wA5;k zTIFzNU_U13+sT+Pk`7VY@(CR!STE&u_+TxdtUVbjRDFQf>sq&Aday%duWbS=@e9{p z2)4nvhN*nw{62l?w+8z7TRO!CYW!HEAzqX3AYM+tjDysgU9q-Ci;nPCqn!~+)y%S3 zE7b`uC#`I%u?=|Gu+@CAMjI78uIbQDkUpLDvHF@!vNEXEftSzpS*V>?sJHyR^t_;U zo|sth&TZQfY*$RTM$`}T$qJa^R*h(DV_{=nyXvc4k%2nh@U~93!6kWMH$fB^H)=}{l^$!7vR3QBvmpSM`T5W2*ed0AVwy<30#RuJkp5!OlNRJ zc5$5na!$AUKt;-F2@8d&(xZ|0GIF}Hzdig-Alc{M=nN*#S}Khq2fTQw1cg>o508h^xt z*S#nec;D;3y>+l}u}@`W5r97Mmv3^AwoR}S&3(T<_gfS3X)BbF@}KAz$_~^PDzQ>^ zAQcMf2Doqc&}Df4Bb9|Wn}Q<$|cAubYo#T^YQaBYSo@!_*gR;>5Vw>Q|X}!Xd zUfZK(fat?|#QQ$C&%1)?=#?tdPZ_%9Z#f|Pf!aWI7M-yUkz{GGIx^0Ht5WD`K{Q#Nre~Mro+ZqHl=R&nQepjW{2zC z)!PO;wROLKjxo=L?4c5PKupl;HbU(tv~;aFVMg#!77=XMS;5g&x7Bis(cC4qQ{Bz8 zf(sGh=4~JR2OlrpA=t`&Vs`MI?21~~cTDXhH#|G&^&hTE-+g$Fy?bL$a1m86*fF>< zyXl8+zny}aHQfQC>UwwbPQeE<7q~li3iipI<^Hx)uy5vUx7WLaZ4tUY^X_1M?(CLa zeFb}HPI#7!c1CJD+kJcI;HVk|@7Wbi?x~%FZO5J7k_#xKSwxNdHHOPwuuJewdOUlV zU^OD?0lNm5Ba6PUYp`SH5;tMDpo6E~b_=HM4c2Dh@g#^u`?B+CgFTOO1BAr4I%ADi z8yP56PlgOt@`Y@|lcRmzsk;U5MbKTf8-me&Zv5^+cjhv8&F;Y-nIqlYuTP&jt>uhD_?{5{03u^zIB?5`AXKPzNj<-eCeS~E{ z>bAMB<^Nz0b0^=|((caLGuSV)&^^2-;CkFO?iI|;4h({MqpHrSJLzZyY}94%s{SO6F>J_uj!DYJXXq>(6GY z-G=5uo11V>^^w^ZYTcL4sh*p?v(epnPW2Q$KYUL0bp5_^4)F)Pzp918JDyve8#i8y z)(}H(JnydB zH<*#E@czsOx6j<5Eql@J?v;-OJGq5(gYlUw+~sqF zNqat7+ga~<^$MhTXyQsyjRu4t`Z_><@ika7gp>wTb8kks(v}@!^ZE?|nhLI$QU? zVBXQs*XH)i5D5onN3|z{*?0s=>Ma&B{U{81@y4K+q!He*sm2fZI^#5lrS&29p(j7C##jX8 zh>E{`?DViB+qh>g%5_vmzxGdio+wQrjlBcJiw5Q#2(ba!AL+^LZMfJQt z&p++r>zsx-#Lx-WUUAIvnKihMl%y0bK&ss7_Fz4kQ4H?v`_Jx%scL#xhK$#(xVaK1fF@=s4b zYfmKU*I@4St+eH$kf(<|j9Pukzujd^-XgdcE2BT&V`H1(i@yAb0k=69wNmjx|1>^c z+Z(LpI1k`8z1B>aivmji;}7-?1%p=lKX0+6$mgO4o_wAGQ)F@>Jz} z___a9>5*{S)xJ@ZF$-+hEViPV7;G1~e;gFNM}#i(*|tgUh=YR`?b4k-+t!}lG~muY zIQUo#G_?l07hxCWw<@YPdO7=p!9>^e!Juy9&l93u=?OcB6-*%1jUt82&F&CN?h4)a zpJ5Yq(n3TPe^VoxaaS#b4e5bSI!$$Cm*rH3&$-J#7<@%^vhk3hQ2g7dek7e*KeF1+ zm>+&2?t$~vJhbd3r))edd3w=0j-4_tL|w@tn|_9}tC@79@Lf?w2c{D2XBG3euC6Fl z=GM4AhLT-zt9$&AU>5S+KMx6x&HUOOb7;^~2S>uR;5)u|Xwdoo{W68RWoPYPxg&#| zA+5)b^TXR4Xg7?0g(!J7xDzP)6+huQElI!G=o9_rm=KU+p|r2k;la`z_K{%MGM45~uKOb}Zx1)Q zedqd99iC>-ZHF^iPIGBbqbr1$HR08jrAArHs4HXq8f;4Dr|yd%4caqLy0brut>O`P z{YN?E9%*t>6ijz7eKcr@`IfCc-%@~Y!L$$Z8Q(JQ$Y4(A7WtM?Bd4oBxX&CJ?AXX= zDZRP!NDf_ab={G{*5K-EM+Tpr_GnYWTxG=sW#_`U)rgtKn-SFg>QTW~6T!tAq3#CF z1FbQCQ4oh4-O{6i$>~>atf@1*ih&{gqkHbC1X=&&vL6fPy&E{kjGT}akif!TQkT9n zWJT`e2#JyPW%uom1zQPOulyL>>rwaM$AT+Ha$bLOU;p@M&g)O^rys|5^ssvsZY^fK zI^A}k2)5soA;gMv2;~waCETq)TQhuZGY6oyx%Rmhn1;h-$#qCbvu4C*uMD^6@gAxM$pNl`MG$R z`^qPS8STI?9s^=gfy&?Fqjv9Y?xs(&*Di5`pF}o$!fpCwaKMlxH+(dBiY249c<#BnSOWir-Ng} z)&1qu!MoUe(~d!uSmzEpCYS`kjyoop=V{x^;sIx?;Pc184C*w?wg4}EQMkt4{+Zyw zwqKb|e<9Zkpb3muyMmRT_}O6p&R>uHQJTG;Dw*hxc89+(eebqs#xCdf{v3Px$2ILc zw>`V%5+7=NZVMl;{amnB8%mo|qerj`pLWlE4kPGK-1f%>+e}(3_TH8#KDac?if~h{ z`^}BhCrukjK8sLiu}NV{tdx9ih1}K0;dykQd-k~CFH=v)^OKM9As|~b&b5LU67MSa z+UJqvH@KN!2%`GmEr!Bo!{-~_lpnTya@Lb~K|LxcA)XprU>vJHqsyPy(q8`u_-WtOPUp33U-w48@5bU%ki%#J4HOc2Mo)8?`v{4qfteum;X>=!_ z-?C>@PbEgRs0wcAmunWdL|kZh(fnZR;`s~C6fXrQdAeux;UdL2z4C@JgD{a26;?iD z{1;55#E3UM_YM;&-uuUQTS=Hmef!x`D+v=RM*Mf4H-m{3FZjBz2NNmLq)%-kF2r2q zLl@XU;36Np&v-T%NDbuAkJ=Lqq&Uyjp7n!))VEuHXPv=7zIca~f_c;+*7{N~k6E6c zVL6y0%%g^M##zP>!#t|w_I(f9Biy4J{HcF}c~sG1&k$$)c$Yuw&Ob4@QvBJRF9tv4 zcg5XbgyTbce*Q~P@1MKem)V&&xV^rN2zY}#Mn6AuXMGtR$PzdBWe&zG+$&$E#+5Gk z3L2jtx6@aG?)-1rB1oh9Qx{(A9{oztw&$`5qL;8X<=!E`s0pv1AYYb>WrfOR_#iFN zGeooW-0P;F6ztw`wH2#c9sYR5F((CwW-`~hzLSGf80kGhLMI=6g6hyx^X$Z{KzDLl2QJFRO#1jD6;q3;$BQ^wULd#|60ByyOmE5OmaZpYB3B z`%TbZ&P+zZ^! zXM)7PcMH!93T3MlHw(iMkJp_%u6_Cxxg&&U!DY(`iKRolIdu@EX7U#=n8_aItr+qUharDq@HoVYf z76wzY7cO(t7Y3isbh+ZfU|Q8_x$q%(^}^ux)(i2Jk^dc9E@O)z0nw5N+(|zOuFGB* zxM$CW*?Q=l>IS#(Il+E_7EAe#JyDwo!hYVbxqqF< zyzjcgO+6o+`iuL-`RKl%a!;He?7Mlb1X{1T?Yo01?N8iKt950q`tSAOYwk1MHln9h zY?+F2FJ;YH0~k7nGlzp9(I z;*Cp!!!j)+ozlYfovx|27Jo8+Fp3@@xJ23+Ct z;HdVS__A}Rsj|b#PL$7nHW-Ag1_K6`xA$<^ z_PgKpaIp2dU9S$l$j^_jrt-b+?^g%^ij5sJpGXW2u?l7j#*U4RZihXBN$&I41a{_d z_Qa>g;5F%~G1;|V8@zh@X?LheKH9R$Y&;y=52%P>-|a;Gi}wwZ<>HB|7T3{rPyi0NS$b>IjYcC});^rLttsVA?X407X2cg2lx62orYjaZjf zxuA~*9&oezg73DkGHzlhgO6QQ(P1LvMjP}B*WVX>ecEfKp->Vp_(~iI;N{{WefXvz zn)<4hc|+YI+2Yt<=`ZTdL{;HxchgP5-aFt>93LW-h=x>K1GCB7coj^_rXt#+GQ7sM z-;4$Oez(ufL2Kqe+(-0ujr;b^VCrq|CpQNNclu%DVbo;V4oFLL9So5;4x9`!(fIIw z*L(|meU%2Fc1NuMSMG7&TEX1!bW2un1pHT{TeTvXoc-@c_tXlG zg1cPH%Ha6cJ4);WJ6pOICiklQ(MquN5w~)sq2AwCa_+8nb8ip64FOtpdvN^Z<#m_{ z_hzR_3Y(m3tk8ao9;^t@ZE@YR+gqpJtq5Z;QUX=h2IYZl-5E}l)$XKK@MwQ;bl0z9 zE_aa3HF!9)#@%s;jraLGf}~m(?hJbP+50YJ$W`u}cLm!`y=RLZ zwE7M@-1Bz@U&#KY(H(d{R(8!A zsK)DV{5?iWj=l#0dX>BW9!TWf?r--5Uu)f&nw&+_ksg=xf|~d zW`b8w-y2L*AAh~q`q+IfRBn~Kc&*{(hP8$XZTH!5K6hWRzxrQ#pY^}#K45y6+xvcN z=eYZWZPm_s_rsj6aWCADm+XBm{{W`O%u1z* zcdk^}wXK8$d&K@@r7p+bPn9lWG_O1bUOZrHp61t6_Q|I?CZDwNXK3YK>pz30)E+PKx{-;C^IJ9hn z?`(Tf)i{KmC#13~+?a!d^>wQd`BHp#IxZnmyR{j0@K#!BFYob?Qfm;_#&a($-m zSh!c=3joI_N~J40i`tU4r9He)^>weT*ZZt39lKXZF`c9fq@(II4aL;n=7cZtycGp# zc*^-6K)Ppb=_v0pE!97(#jR^gdzL+!Ry~Rzn2yyBc@fEOl|ARh(xG05j^m)LTKB)m zwpnHW^&+VAj2-^x(wF&F_!qXsGj^}Pl>VDv!`78nm37V;xC9L!xvq2^y9_jcej z-_B@f$IiD;wSyb;?cg_Y3Yl-Gz6o^Z+gsi&UBhG74@$@JsQHJKY%cn%ApCqge?1gy zzRjPQ-^aRd<3=&xR=!<2*uDJ$>%U!E*}v(5D-9gAATOQ)_zko><8Y$CH}l$WdBC3i zHbTmaw&QJ@{)-*?PU+><>jcYCLKKAIJt2m@WZ!&;1N-IXP4ATUi)QN#cGDm4mtJZ= z{2LB&flfmCLiy5M_kL-MOcxU z4Rj#GJ}te`y={eE^J(eizN^9Lp^?~FxiwqnDghRSg~54QZI;>}{;Tv7x8q{_`oF{n z>*D6#n_#yX)1m(^J+{Z2Bi%kha7JD3)MP$&#X*7%IoCe^@6uD;dBwJFVOfPeU~_39 ziuVbdOOLC*di9k=agoMljyTl8_)o_7BWfc&3risgI^tA zl%9_D?}V+TOWn;&7Z@|z`FO$M<`}1L!5wB-_k#x(yl!r1ehro8D4B?3j?2GV=htp; z$T8pN-2&H~UU3cYln2wHHAa&~W})pKHIpbXC2Gdy-TojjtFle`rrJIiHT^r=oS0({ z73ZTIa~Or+&oQUvEtkSQwkup!S1b|S`M!8;D*zKcnc?NOFi-0qooCK;-+0j8p3gKg zWByIP8RcHKtn1t6@dajY2fb!0X1eDTO)VK~7skw<&8uT(qSNC#$wurY@`pyY5MH1z z)|+yCN{z823eBE&N})Mv@b$%sZpDr(t~Rntoy$8LO;6|YRSjp8D=6iNvu{l97`g+zE-aIx@(20g1)gT z-RvN2$QU}*E)wX5y%mD#xWS*3Ybh~N5d=KJy-Rt0+ftNFYt)5~#Z zU1}Frn_=#Ym)mD~Vha4c+8pP8dZ|6OyQwa{v{<pEJ9gecZY^_L1)9Sofh> zwoi{tArupUPYR9gL7}&2+1efy+H$ddqq9)86zU~~@_JHe(Iu9{j*GTyagvn%M3f)6_XHL7flg+WQmc*un2zM4?1hq1~9h(Bbs-kCeWSk$R(Rs5fVp zJ*S3xYcHl=zpQ#dK*Q?N^(ITblcb)Tq~7L>?fyw7a`Pp2VKOt3Q>D;pQfRppdf^hg zH3`}CBjg-@5lXGr1ZeiYt%iT!6k3eS7V_Uq4c`5pF%{^qCdyR+=l z{+UV`s0X^|N~IV3Q|ZFl)(oKKS+nhL2ACdRu*9{hI+&@@xq1M_o}X=Bq}Y%dmom$m ztXYB#Au}7)#1u{YbF}7hJ5h7-rS__wsCkRPXQ!;1J5y6!_tWsH)0*?OW^!k0zHqTU zc4vmRX0~0nv*{iVjj%yFSg6I`kz%tiwlxDOwt0@N9hfQBpv4wxv0DdH?3%guZHf(9 zDKi+9HIDv_LyQ~gaWrbp+qLGe2T}9mOYJR#*s|}~mj;>Pu6^1T4hFpTDZ8IMagX?w zJl{n=lIIss+0}#1PlvQVOwz5gtd4rqQBzh&P1@1D+R+bpp`#ZcwsUu(g^NG756g4W zI{N`n0%ZFRF~c34VR*RLZ?)$S;p_4l_8xi8y~VDV=OfqI%Au70lN~#hVvSqv@8stO zyF`ArJYrvxpNnj4SAM?owjH%Am9F1xPm|~0=i6&}qN99vSA*pKf!%o+JP}xO_%KE` zx5LgEMzQBEv=0q4Gote`{&TZab+{(2I+Nu(lPSZ^K2`NI(SJKxbC01Gp|J<}7+px^ zPluZm*`7!3W_GW-scsvb65}_8Z`#f5(|v_}jApeYNqn@UPx$OUTW5#w&cOcEXit!* zZM0X&^Y!cO&#o_oTa9=h|s|nsWJlk^F9+YaiW{Gw-=s*vYdgi@0;?B>L}V z_HkZmK5j4Lz`-_8-rMvhsNJ47l0)GDd(B8Q=r@0hIyotV)@$9V$z0f3F}Dz1oGU7N z0Bf$w<3J;QLq$@gc+NmMj~KP4SMJY)6h6JgMyEWiB%{s|r+c9cwdYP3zdm(&lNL!q zz0p4ZQCU?AUU)xgVc*D}RDdHE$2jiJIX%-5eGYU7qMan|F<2|rvFg7^kRjP`9L<}9 zqfUA(Qg}8heReU|08m~$?9hvomzN(oW~Sy#Sm>z2Ba+w!jet%fPjBm`ay_5SXX09a z0Qu+-6(|rU8#PZB+8Mi6R1~U5BCa>)uBg1|Hm*0Bo}@ELV8l~X>C0~fP<9sJARb7b z=v0m%`ICH-RhD@)z`_L+Av5B>rm|`SBkzzBnFN&p) zLvi~aDZc0a3$|di*>^CRWq`?kD%z7xRwggt7>Nq!y-Y2KT5IfSqs{fznWlnh$ha5zLxw_tH10)E^ z-Y_~=VNYIV%I&B_Op(pGE~hF_#J$4%ZOb9%hnOl0Cz!s2(n~IDM?@E`ph0Mlg*GzA z9=>|hPMTm&Vf!>sFoT@S?3)wVMVH$zCYXM_Ode{UjlY`{Y^Em1mi)-n*d9MHdk_FJ z@dtqPQv3K1%>D$#I1|mjxMuA)(fIq#khDNoI8!h-0(Q2{S%~bcRggL(hh!R4eTgr6 zAu@q@7FCyBFH$|V>g_!f&8XBWh?^vYsdqy0Jx`QN;GUu^J`&)bXlMvlrjp>^eVQMP zElkoTumuB~c)P+W=a>ayV~x_76yU@FUfoqVrirNk;=eKWC;hv4n7NgOui0N6W)3V` zolXrB8PjH$9cBif+NN*9;0l=sNkLrx>zo^DK_XK{|2M~(TH;0=RDF>#K|=`UG^8!? zHphQdHH7J}NSsPiL~(h6{lO$NtoU=ilv|KK$M0`1o`m?Z+TJw@=K59p)Fd;Z=dI*9 zRBaoeWwjdP{JT;zXq zzTNAGh>#c9(|%}n6EC;xe+U9@u}}Tb>{E1EzMB^)B-&jYKdiFHE`qOQt&DK` z9Oo73AsOe@O)<2G@&`A3h>ac0JBSDLalfF2kv!s>X$~hT93|MHJ*UedQNbx82rXGA za4(0tk%ZrtA7OT7&x|?3jIX#^3ag$`P_RuL6_(iRjxYo9P+4(=xj_yqGs{l-F}v+S zd)<%C&hDqhcEyj)g?VA&8P`@+*Pfel{603$2Cm@-SumY%Q+fU7d&a=&Pjx;gHd9eA)pPBBCbE|DS z$_(mWPgdNp8TA61G$M+);nyv;|2zt*?Et&S(Pn|$wygQ-qs=vr+x(jS#j$3DTlac% z!?9);hl#H`&g{lyzBrC{n9RWA&B5u(oKRL)ZRZ|uN}0^H$D59j&9`D(4lg&}xJ}ItIowB+`2zB59gZa2`vgzOdx*&j4XaZ|p zU=(eo`EYBn`o;E($!0HT-<~I!GtueXb^>Fqx38UmMrfXW^u>w_JM`x;C}`zR{kb`L z*z(BKl3dpz85Tq(-|sh06dnF6>}vY;&v(Y*Y?AMP%2RwyXWK15H@(@pJx+wcHrNp- z8gXU)$%*Xhwf4LdA-B!;#uI_deFVwkh~2v$j1Xv}xknDyNjd&I1mI&+xW#Tg(F}ln z9&nP`tH&qacDjCQM0|ApXPjhyQ1J9Uux7bbdg>&t^ubBckmqdv$!Ib@vAdpZs$7a5 zd@|e_&$CW8W8seFeV3$xI1KC-31 zJ=t243etIlpnJLoUgHLiP&rpJF$v?@*##`vOGA8Mks9aChCZsh^3PzZ#DUGr~Bm7~&5Q#;B zC{!#8nOD-qY#IR%@RSk8CXvp*Ov4Qhk{~RWxjDrV%P^wKRLU@7d2g3N3h*S8lMK@QwImo}i?VD5g?8eF zrvFJnHBu;L%}szf&}|E~3mbBfif(LDC{Atxja?Ks6$tCZQp0VqB8G<`!?+I=@#RZdJl_!|IPO5L9OaNF= z5N-dRs%PJbu}Rlj?b6s@nHr8nyp<}Bo$-fQm3?GVRe@bI%k)S!=|Yjlnwd+VShX*R z%iKu^>5A<*v_p}@B@LU+awh6_lly+9#^qXr+kRafe&|iE_LcHg2;1xn8SzYcO8AO! z5|R-}Hco9crJ^?7Ek~c+Ic(`o8fryMUnw;t36QK2X@Df_rQ&N_RJM&IvF1atY=0ET zrxb|}d82Mm>^BKo$~7-z!yzFPKG52WDj4V`1;}3|ff4fzlGjpGyrNhv36lU~VaL#D zbE6^!j$&DDX#P+EFmJ&lnI+FnGG_dV`Bw~7G4n-SMoC^4j7W4Loz&(~Bjpm}qE+gI z!jNN@0BpQDbGC(CcxeQ*0FUG}w3@i&T;~*! zKp@7z^0;Hs+!b1)z66dWAPUi}6OOHny%UZCgQ1aSI?ik`wy@rjz)UAD&%q*fPHYtw zQbLL)QIX6{&OrdPGCi|(U1k=LQ&QG*pG&&{Yl99f-*3;FE}XNZH%jkN@+O(3xh?4(iiC5>;mZieW~x!+IbAZZVI*5%j4soP-Bo&zV;Xs@1Q29+!#{eZ}@ ztQ?1reSQv%d!2o6j;SdlF_VgVh~=W@kea>bnrf)B-FvRt>C2#i#^jqrp^rjA>GU^& zf-Af98$;nwZE!h#I-$VQC=_JX8tgvv6be6^7eIl7AOnTEc?yNbdE7TY&B&RRL8lzax(yl06qa$I>=ZsgCsc1IUB+ncv8P`K z@fQ>pTnZ?F9#@h@t4lxT?eOqz$Q1$-XpBH5WjMen&FRyn1|z#yM|L|S!+)8cmG6)v zK~xoF`N`M(J}J=%CgZC909Uo=qL9ii&jl0ZbY=#vxk)aaS}8k=CpmuvyGZeqEFgy! zS`9WuIBGH)B}nnbD=xVRTY#MBl7%~86y|c#5lOCsW((}>I#Zl#mv+4o(G4;zE+CN; zjg&w_e93#9ORCLar>HthTb?Pq#F6bZG*Zjel|VK?mL$#0mPMDOLGg_GDcHJle6>Ra z&S&(4qIG#mGe%N2+ltFg#cyP9$3;FQBe}Ok^sjVYpH~CO2D}j7AEx0^sL*7L}bNV*!9eBX#PVP90zq+_8}!zXC)KQ9d;&*ivaFWT@zh zB@047K`=rV0elD*2Z|b+4~#<&%!*6)Il5b@-NnF2Hh}MnawzC8dwCnf(P_^NihxsH z7gEje2Z_RTRKuKQVL7}Thy&x3teWdDK!q1*jG5_jEi*Vz68gMT2Jk+^C2+@_b)JKcYmV|QM+Gw8xbUclUgJ#ocIpC()4gI zeBPdVrP)H9{>c_fc60NE7KNPq;4RH}*CVF7pZ?pva+P_Mv*p^W&E5NcQtalYaz)6j zi%w0(Frbm~8n|%bJJ;3;{ei6}yBvSj2#*#}vY5H6(5CjNs7$$|ga05M&<}2sjfp&l zMm*{w2f$rxMZT_tmS#7+5~C3osF=p&$NV-v$IS!uIW*Glu&-mv?u0}o2EXkj^Fc;F zcW|9uBF3FMPK`-XF0eXD){R`bMToL9-Jq;5?qkx2bWY7$7ZH#W99#~Rh}JYqtIP@i zFG-rf7;5w7!e&~TFRcIuv8ybktbn7A(}@hOG0EVHrS7g&y(z?j>lRNcEDF1of(;aG zPr(P8dH!j9?a?L zFO(!b5)DKSUTQ}yswhq^;RbH`UZgo*BZ(qex9HMD*-;R>n7@j!s1nRDg__bgCd9l| z<^v<%B4e6ek4;3fQE7>l;@=wpguxlP-CLZigP{f^i1`c`vJp48%ESjdtuk}D+)l?2 zXa#H{{Rr@9M|CR-)Lr}MYfa6mQae}_lA)EPgSliVBJ81(OBDAcwTEthA)>kj%Vo%U zq8bE5Iz;J2hPWhc>naw2lf8@leM)2O@z--%l2m$r3dDB$75{u{ z$&%OSx#x{xJc!}aDGBH~UH5i3%3L`(Ht-yXbXy~E6!|l(x`C>5;z30O(V(J#Er?nh z?%NLVZ;Q0z^g!Ga2(Sz+t1gS=-)A{y+qu`9Jw_L*Qb*tbb1B@0&^J9J!#B%f3`>+* z0bb<#|9k^}ZkXA`rWxiWu52(Bsp=8YffT~TB85bzlpTdSN;cFIj+7^a!=lslJrOZ= zJJqs5X-*%})`!JL3Nr=OjbV39HyQ5kL$fXUQeUskA40ESZKj4vf!`(o2#+_;kLi-fBNy zu|kmHfE1jYE&u7li^VObE=@rdmnL=hU|&7{2DEIokfb_UXR-gw6K*GvPMKI>9aVS) z3M`72(W_~x78((j4rJ6Fq%;YV+bAtF2aNtOExMbU9072gMPql< z=aMULG*#WvQn5H3F1&5zEm!{N`nB5OH)6f3vq#-%KId`dO<1BIw71=4PU*IeY*J#- zCcz{i%ae|ANU%FELPfpK9=Zr~$P4zDi_Cu!g#6kfb8xpTy*!s7RzughW1z+EyBNE~ zWwv&)8H$Gcj>YB#dN4Pe1HX5Gz?>v2d%I`wAj$%7hjT$SF&hc=*OjD`d*q64n-~Ro z95GcctH9yl@|$r8U4Fm)_-6ArM)U?^oqO+<_Om7C$izjlbk*o~RTo$Y;#&Su|R%pq=D8%g-fdfFRq!9c#+zHy5gS#Z%3B)?)b;(c#5e>nC-at{!H z_NOuqYjL+0y!WqpgO~ZiO95E$_VKGoA`!kta~i%~v5?SuE^3no1Rn_kh8dQ#|HG&4 zzi-8GxX2E@4a>=Af44{8hKJ`?yWlolm9BZiuDA_b*CPA*ZDtqumVem6x10Xn#lam1 zvdGfL<5$_^Z^tIF$XEB@a6 zGp)d ziurj7{)ma!(!EXQV(||;;4Y&X41a%@xj6qmCJi5FSKo#EZJ)bMWBgLCQN!J3Xk?;o zyW0#dZRxZHMf|44Hg=CW$X&kJ9(IrEiJb$&Gfu$IOuY2MHM;=8U(G-h%V^tjDltK5HL;4103U8Fu4im~0;?Yd&W! zJC;A}>K9F_|F)SkUo9xy_SI|6XFp+zoc-X=;zJ{enqp`t7n7OuSq0==*pPFRp2S55 zs-GP5c}eJ>6qgeKqYi!!k3ESK-lg`VC(S`Q!eZG`D-4`XhV>|hIXN|KK@9C;H29*# zUb_PK8XT84s3~Tl17DM)%U^DN|LTY=D?r_kW5#vbDs{c(JU~rk*loWc|A0Df7F+ zcGr!DVH39EX>-7jNC=yBhB+lQxd|v&gk!@YL99lf6^YIXi8Lp0VC^KrIsSUtxnkRc zcI_KEnFoRk_gf@#M>(h1Wlx*x>SN^KQyMDUoRshU>l&M9$N!j=0R_*PfrI7Vh+5gX zdiKai5JBcBu4bX8h%V5i6D?UO=jrRrr2*1GTW3Hpc zp^-i9p{sBickQDSQLK=KT|eR?)xg=ZK37`vdx%U0O%IKq8UUQT6wszi)u2>}j&}Kv zu?BT|O)bHDAsUUy|2X+zkZ$K2^j`jKa6$tre>XZk199?#%39hE#0qXue~$S>d>gJM49{yzaK{dB|IH zhrQ!@zCO`mPye%5VSCT^3hWopn@^w#6ZbFQ#r9oe@_H^T3XBqYvN*akgxdr0$b>d} zjpDN1JvADv*&(O49t1lp_}+{{oRXYO#kHo|q`*5=43k zNm#+q$f>+2@bQ>zaCkC6;-GYdv*kC5n``BF!Jz2m@4=mD_Fo-^FeL9Wj1-5LlWl}x zd+Rt_V8~jfjX*dq@HvkImXF3(sxc4ogK4qZLrItszUpj0#C1-3jNCn?BGG3Krv#d?&;RZ4-1x1LJVFwZnz+$*f zw&*30`X6@qOQu(rr%z=HfQaM*vO$fNuD6QHlsYXg#7-uN*awAL8FLE*3RRoYMWBiS z2aG!!HWY*nHKJ*3&PXL`5uzX?XH*B;*hDNP+X|hc0mJjn0-1lu47rYs!T?4+krU3ol_f61g>bQE-uJ z2%X@raQMj*<>Eo{ke{EV&zcI#$Px03&hHf>;;5Ly@~NC;*2DD=b52i|&?c863DV0w zhC?IMd66gjQgR`TawaAUYcPU}JSKZ4Zn%?TN)5ylMaERsz8d@HE2R87RED}=n3q#) zBa`q_F0b^>nNw)05J)klQXT@dgLBRsEv%f^OLu< zFj>J33554ycabAf!9U)=;Jj&|kos&-2iD{Hgf6ba7s*{l6O_oGoIMaY_A#vUAbC;jGV4XTXt{J(RHL zpi)qLNIT1e53_|J0MEca50z0J82{E$U3ZIKvP}OH&PmxHqk<81l7-UgNEMk5Pmv8( z^#olKwV13)y=u((I~TgtAcaL&qH{yr!Q5ziy{l8(y`8e68&REzfJGD{YRP4M9YR$^ zZzIQC#@!O9!^(lcA|yp3)*@&dB}D>{fD{3ZD5S{#pl4<8f(r4dcS%@s+v{enOVY|2 zZ{Rm~{Zf1P8|JONN4Mh6bD6!m9bdm2ZA&{D0iLlhwZqzP_3YMmM2U@d;+y6`XI}Fa zZ<-lgzGo}`iX?X9l0*J#T#}DXcpItheVcmQ{4w!B+G!ESFrat=2PM~~K?ojc-u)d@ z=(ty}vIo2iNAQ3kpOWer@b21LLd%TAa+I7wK?{Nf^S%1af=-u-?Tc5norw^aLx1XzneJ@uMhaZG%x(?GTT8#J6zdFqEDX8C&M1eX5ufBft2)92c z4q|(PHc%2tQwjA7;lS&j?M|&}NY>j#}$vmN$a=Zs?3%`QGmlh7q zZUHW^U}!2bUW&>oGkQdH5|sj3T6u)b8GEE)3grv^K|#wx+_)D}ywX4?T0SW$Co6)D zpfZXavcyuv<(6O0Gln_Y;J{9~)3z!1v&fiLX$Nr4`mZA{5aD0+7V6gfJOZ2y@N^jk z+l0eyznZu__7tm{+}kPSmGdRMYE!H`rPd!%S#EoLWGahFLhd%+vnFmwePoiHwVPxH zg7|EmJZ@5xH8NK-@IleDFsUVGFA3EgIxXam9C74OPD|LD&IOyvuY~!Yc!d=x6_YiC zFov8JWGuI96M!S-)kw)cPWN;0rzo2XZ_ax7zG_~Sm;Mk<& zYz^BjP(9f;usm{5v{eZkUJ7ZhDgy>R%XOl!(thVe*YZo2GPFo2AEXEh+Z9fBy&YX$ zP-*{CTwa>W5K2~kOP480r$Zx~xNIsnz`PFTR2!ya))s}-YMx=aV4C`M!JV?;lB|Oc zx*l&skzF;1bBKgcR1C%9){`@;hJtumDD$NSbv%BAfH7_~rp?>!?VsRT-fCz3%XHgC zRON$|$pIePd&rF88sLt`u^}1thxjfmwXtoNAwCTL)Wj#EFa{(-EIIq&2t|494^$Tc zRh$aFOnQOG|B<@Suy3@>Od-VlE0h~EZ4VhwhijYuZTe2x;KFGs_|Z{=StDp;2feJg z8Qt|<9@T6Bx9P$M?9n-+R$55C#8)Alak*-qYU;=M5U zE4!KnDaUa0Jmb389{I2T6*vv}PT(~2eUyg-z9l%>4Vxh0-#*Ip|4u;h7}sO@{l6Vh zlqMR&4XD@*vB@Sepv@wC*=Ex#zfRbM!Z+;Qo4MFm6dVNU*`q!)eXE|t6eQe-D#QGD z{s}s_M$8ws?laVIfp`4-B#T+R#q88O@FXV`4o`9kLlkW(2fl_HK4CX(F*_HihkZ|S z$5wrA_R&BgwypS8{j08iDk|?}{daD=A0`kp52yTpiajPAwS~jS_k3x(@&2 zA3N>~Gjg{NBdW+T+^QMlT=f$?(44k%ts)@d^r#Wir7VuvmD}7u9!?&IVYinSr-F?P z*RA%aWRdid6LoS$KrHeNAr?}TN{z1H9z-Gde-<){5K65DV)pB)C$tno>V$~aq^)R7 z7{8M3(o9z}8A!c&bd@f_i!&hFrNMy?LG&CoJBPk<2p}$hk=|?5j$I{Uo0e z^D-Y4lmO8$RWhK)S+#GQ;NrJ!UtT$Gf z?bn-^*dgSGtAO|k_oDPRs)}USnjEi?B*Ndu-WNKlQow(N3$l^AZ0`M8Y}qm}k!Gzh za7=t9yMLW46o(J8(H(O(r;{-3Wa;?2P5vc&TqMUGY;wFJbWA)m1ZUYlaa`Fyy%qsM zknz13Ci2!d>0yRL{7mXNS#@;%RF>2I>e??(?OlZ+#V{(ECKn%_shX-<7bcmT9nm{W zXsRk%L6}fp7tlahMmc5tQ7u=9irEV>S>#+1#w28dQzDV=7$Dr~&`8qH3u5h%4dayO z_}-YJq6Ecfd67d%6zC%Z&G^bt&QobvygD^nhDwKuD4~4sK%xJbVem?1JII0=f9nP3 zqAf$b_G`C9r-d^~tIefn?f8gPcF)q7)S1+&G#3do5UIni>9~uiOr1zNVt}fX!FWwv zEbNV3>+-#1!L`r9wH2Bhnpfs~OI&xwJUcD!?dh(Y*SsX|^}~ua&#o%;PA0kB?nU1I zj{E1i&C`m#-#YHbEA567@0~n5pNoCw*|nu!UAMY8d|4op=Rp4@ZFCVS=+8FZ9Bk0} zp7%2%^G@)*a#S_H^}GX}guUPM?i5S$lrrxk=Z_>N_bTLPez`Y-IMoBny`SgzPjIVy zqR@7fdp&L6?p{Sf9Qwt@L;dX=<=$#8D7v%4BjM*tQbBpAh||M}O0Qo*c-Qg+2xhqB$9(ycJuo4W#4YzsZM`;Q#bE2;tThv@^+6uK{UTy=;utZ zr&Zx9S!d@~dDCTNPPI2no|ja6)2Z}%wRd8=)l<5A$8}p?n0BsE!dHg^7zY;na(C~u zo$uQ|H*e|TY45cY#LtH_I4fezpn(c{KgC1jnq55c{di#)Z-~3`-7AN9{VQ+~!F@_tCXP~V{DC$*a)_4u%@E4f zzH6@?;_WUzBF_);@DXW_4h5LRDDJkaH`kfl{K&2#tJBi_uVLPmj{C{$_Oji)R=;eIv*MxpJ2}g4EGL{)_~MWdsse9B`n&5snrzjkCn9_O9K%KE0r0uHRi{ zj2xo=&I~SwNzg(fYR&y+hgokAZx?5x-G2}75%=!7_KQ8dYf2}I^aEg}RN<;!Y~!9@ zKifZAQnrt>_7ZfSD;6?tE))NI$>q{#-^lpj*J$*C6;t3oqq{nJ7*#m7Ppc`g-S+bO zy3c-ON9^U*98UT`)2LZX2G&$b2`&~sN2ubB~A z9E49C|LvoDvlj|%VubgLuN>si?_`i`WROF5ILLfX&TiivsKdUK@m_H~?qS2ebi5UQ zWc)Wog1x_!@vdD0eD~hrc>4igWoGmIS9A12b27Xk0p$=0g+3V2wN%#s<`=| zJB|I2qdy9QAEE-0dkd1>HAWAZ3KJy|lPx4jMK`jPv@GY1Ss$rgl7t-x?eio&4!v~* z$r(bx|Jmu3R}(v4UQ_v-yV+@{XezrOgX>h(m|RCmHX!*_PG^BLNi}tI#5;v!xpW+2 zE2s06At+GP5>bvlhlxq0oSNHnZizli zrqwu!K4bQoy`-i`6q1z4m!^}&V-qe{+B;P3fhX41$h3pSA#0-KaFC1=3@$JoB#I8^ zm8N&PUGHL(hGE;WH%>`(o0h6^iXxKSBCBud*$sLQdrTJVbSBDzXhoM%LFoz+lyz(< zc&3Z{Dytc~cC7wdd?_&moz%c#t|%Y9z%KWK;wv zB~Z}jji_nz1c%xYvaovtC&-ZyKQt-K4-5;|MLfiXC(vI3z9`y+WKPyfP9NwgwENW* z-W3vw#Q;D=<6X?+B>OCJxtmgyE!?T;r-azM0jeaU%&0J# z-??0I1)i(RRWvpcWx0f4!>k3%MWEWm={YP;iT!+fRaI)OkYbD|O1q)jHJaL7npNjA zmP;Df)uI_(AWs#X5tc!6hGk-I(zlsK5}|Pch8)G0a2K*U$~lhuP9uxxJ*XUqIj3g0 zo>U+_u-{I?3xdaFCJkJvSWZsUXZSJ9IV)2xo{V(QTvZrxnNRFSZ(L>`5 zFup`iS&};>xfvx*-SVyi?@dSDR$prU!i?u+o@SxWiDA}%uwN! zVWM#Br)i=-oMfC9!K%=xr6+PNu*7^XK4BQ}&F2N=&=3t!*OF@Bk}^ptgz1m=uyAeD z9`PSeO%Hq80bVjC#u}MWJ?*n`7~z>XHROu4hc|i14kVZdW%@%6HIW9vYvGq_CL0Av z^0`-112dF`)ol@F>Opurf}#N+m(MSR}d8c{VsQBEJdvQ6^l@mULb@jrs;rl(O}d^7lH7Gaf8mlamw& z$5>51D4n0RcI=u+O)rKOELd+k2&{B@_Q}y+PXw&DMti%ZdWN9|F@pb{2pc(9Vt>$Bgu&RDOHVIv_LsOO~!3`KSrZiobaS2&mJ|_+c^)d1{>4P8tVKAhC>&X-6kuw#N954a62h>Ev zlO9$7M&>H%kYG{X3u^#JL1AZH&l->{ z`?SePpYc*sh>skS>p7pN?#%;O6HQ5tME(T4`LS+O(z^_Dm|yHs$`6D1rowdr>&IgdLV3-!YR1aY>o z$Yo2a4|I8fH;d3;b&x7t?2tkVx8LC`u*lW2h~@%0t2NUc4q*1*4|_Y=pN#V=xi#{% zap;M*#BKXH@1UZa%fl1czwD#t;RkyUJMP?1ZO3?&5lcR8-ue69633lUVA~GyE|d>H zo!}+hw=T86m*=ya?KKm;{V8Z)p5VR49h_Gm<=uhn)Ax_|4xyv@M|;yoULEIjBbE$X zTbTH$QTQhH7%4D(%@=oYf55@8nv6L8VXoc(7_XWLf}b4Y?M##B9fKn5Ry@|dZf^T6 z_Q7Mk{c>-MFC}(i%Q4<43XD9~8%}{=9_yXpykVa?*6ZQCZr?f9tLInhIIlO4YmY-8 zbDMqsIB#D@S9ClIlsoMShodEX!`^p1^)9g0lfB(}OqlEqF>f$7zmAAt$>d%|vh-P# zy=QUh`0)u)-wW(nCwSv`4#%J84;7zUz<{W%%%HvLecTeyJI!mqjxi`j} zTP7To=0DP6x7is-c**J+bjN*PdFbBcC}_)@g5{);H2l~8+&hPKzuiyt2GH#RCwhY= zZ{ySxy(4gAYdw*PK4@P#(d$L_t4$}OQ>wSSo`m-24tvZ=USDQ-?n%_Y&2Bo0X`gQo z|EWhrw4L!&Z=VVopuoQ_mMq3qNN(!p`ht_a(eYsD9Wj1yKH1xcq3?N$w|CaiH^gLy zx7l+}@sjS0xV`=q^iB)x^QU+RFb?k*-bluA$S=IZf$hRya5lYRAO3~+d;8;~yo3I4 zMU>YAM0x!y5asn9AcH}W#}t^=aC73sD30Cm6v7{Im{DT9a-66x5sr~(#hwJy%ufl@S1(LG+w=&P0SMiY zaUp~#S6pvvehDXXQZR(c>UthXhJLq$=`7cm#EBYk6UcbEsJj`yoXdffCE<$c7jpC_ z#94qjN+56aEDC8ntUl_5zvk4K6P!`_Nvh;Uj%gfi<$U0?I=3K&`tn0Wt<}mKlGz&7 zhm)Q>CQJpR#%VN2vqHav7668}WBy1XyxpXx&V&y&@t~+UQ-*L9F@q#dBBx2vhc?D= zlJP=V%|H;6b|v~%z$UT>t=`*3A_~@`$stG`CWR(#pRj zArsUVBgZ2}aMFwEWJHl8BcMvwxX|3bPPr5K}+u_#iFEKaDLdM!VWJlD7BCM%IlS?ReOQ3 zabTc^Qp%EOdH`ka*qV;HmYsx4Q#4!A(@kMUCCJ08e#6xI`!MtA(>CK zrnm?tYF@EZltPFno( z;>mB;D<>O5vvac1pe$G~6q_Q(aQn7GD7D>!b;(sHl~^!njd(7xyy)hndYX3er=+7p zYaw@SgCiuN%Sx~1@lF~DlF&is2*2YEpzV+u@l0*V;Wc|AYd!+aO*$XmX03SF%yYqt zZ^(CWf>a1fE)4Y6ct828!P4l{^S^<|VbW5&6rIiNKt1JzY z#fyJ=n+27XosEG@!74wUt59kClK$6qsUqmd5(!jfHv$W-))gunx>S)~H2Ifo`4$~c zP%RCWj_ryMjr=o&isaGT5vi!nOQy08%>eaCtf#2Q<_tko$1V}fnJrI1RiBsMl#)g) z4J{|wlw^s^yeg7SDPj@SP5;#_V(@Tii}KC0vNKaoQ(jV{l|r?Y;4fNINhrp%F%Okv zHWIhw=_<8kbt0?Os;%=jYn58_viF66d}!n`O@_v*Qa%7kN(P51H$=s@ylm_Z)@fxP z0Krr)#1lUzF+pN*FcNvY$a%ki&no7y=B|+GZ)~BP(XrvNn zii`#o5z7y8D8I1V@P2d`NFn32dfmPiRVNbB(4 zv7SkZ%uMsm!Xw`+Pzv&`G-k2ujFLoUOJdt(br00`nq^LoeOld=F-wroOoC?6C53ua zW#E?Q!{v(iLMt6~NDS=O!-opK^)-^e-{AT?LQhpv^s#RkRG zZo+a)HD;9TP=lRcy>Q6%E1^ydosgrN| zm|V0v4^==3h)L~5;y1?4nV2sLl~AgYghU1di@He$)0cQb5ut$S9C;#c%;qUB-~DUl z4XwU`+a$OM26*cC_-9tSU?&Bhlrh zf&iIiM)ejoe-;6T!E8!jYN~;NklqZduUQ(v>44Lxcbbt?<-^~3d*#>0#i{0fJM=8? z)Yx4{a)OY?!8%Wt?QiG&qN2e5^(^ml_g@d%Gk@>B&^ur?bHi&jG;>2*Hpv@je|9!n zu$lI}v%P%^9uQ}yQj*#{a<=z}ajo~DcXYQ&YSVHpFAKI!YV*v?c+KVpZzT{`_?($5OUp>o~v&UIu}=^1#j4C=i+Ph%*%H1x$1)S>bc&K zf+Zi}r8mHAvXAZ)E4R`&6-^H&|Q?KLCCqDV) zgNlVt-kPv28K)uf$rtHVG|8%N7XeJOr>hVTJAbO+c&JvE$U?NwuvX&XnU9))oT-wV z7KTY)42q_XOqO;^TKPhCWpLsOD)ffH0F=#4YLe=jsW4vR2*q4JnPw@S>3at~XB5nm zN0Bwk4B~cir6+rrjnP)ex+srv~C{2v+PQ zUjx>lhnrf#P4(|lFfj!ITAXG%B-HJ!msl+l>;dt0FgG!nVNO3T0AkprPUC1AE@}*B zCl}X(5{cmN;nx41DUUJ=JL90j$`o>W1xCjAq&L1r{e-O)feT-p)g5~t^m{u zWvph2uYxVUGJ@#aKvq=BiY%w{hk$nH0A;rW?Y9Qm8G?>Gizkvmb^^zQf|k>DThNNr zMh3KL$TEX2kmUr)K=%3`@ox5oWcR*fvtZ|L16xj%=Xs-1n-5Ik>i3B~ zJ>^v-Z>ucaEy9KuA4raZTMCmLp$7+t2ZuzItDSC6dB1cPG@Duv$H~8aXn!>Wzkvt- zX&2A%ZYZAh2^xK>kD1>1 z?wL!4BoYlw86uY-N9zUy{`9<*s?(zBmiweg4Kwewvx7o#D4d!GZNxrkRYGO->Km6ZGWbH~B#a z;|lL4xGXd?I1<9iz8eEt*Nw5+qVz>1H7Nw}pR(&{VKEWcWyFDlFKsX`c74$CeFV1(|(o~M5 zQ*9{RCUSnHv=M{^$j%>`OjSZ+S!abK-0(;7Fknofo6bKYU!fPMVa=zp9TgxXLl0C4 zm0pUW6axT|-$pjM-iUoDZUFomUnBImU5l@aAbW%r;GW1^!%Z9#WF?+Igf|49KVZ1# z)q5ejo!gA!e2HZ0kV2a{yLMPer&vtBHyVh-2>OnCy5aei5o?3)a+}?Pn zsmdWhFm4auYASbWj|ZoF6^*pUg-2=U-yr6W&Z;`2?yBsyKkeTAM;bGSQh?wHzcJ2% zxsJd6b@3^FZzY0Zz*}QHS>5TFTac(igq4CF+DASAR?E3f1jVMfZF;7x*RP0x=!oMw ztU)ME(gAgL*wL*L1>9SRAq&IFDK9!Bd6}p> zyf~r(Z~r^c9`#*8`{S2!z9021L3^q|JNrMDOmB%(JoUhHU4&!>e@#amE}BNfM+NtD z3H8+^8pUR|?$}WZr!b!?YAM09h@;E-)45-CK)ev!g0&6acR|IOJ1P# zurGQ5cCSa;JOQ`YWeRBz42{B)eb2onnaSKFRf{^SW+tYy-@dAwndjY7v!JskYrQER z-~!|$SCmF2AXUJbvx35zt6VwC!7;F)Fx=+J3J9rtaDZ31T?TfCyK}@tS~GGqIhqDp z2|7H18&fd>DrD+psGJ0pxtbjWyvwNj1S4SX}C-YTapL z6Ww4l^2`-P(WwbTe6u16*p%Qddo@Y0PMzV(el0p1jWiU|LD0*euWvRI&nkA+9xlo?Wy*~Os*fwc! z2ipeDRJV;J(&FIz-`h6b*byMV0z>vpZYD;vmcu=RYyGrX0~uOjS-KRdC!-#Dc7hF* z3FGX@99_ZYNd>#-KiE8Imvr;+%xs>8?J%s}(s(T7Av2LoC?{`O%@mchrgN`U%%s8x zKdH*>x2dFcGMnZlsgtn>N~5xASaOsxvT3k-h=*;kX;{CkO_NiUV$+BxVrm@z(~xDT zMR~xR!vP2~>E;p17YC#g)4_l+QNueaW~*N^jEHhqjW9!c-WDY)##x<^g)acsV6|GYw=&(b1*HbcKhzOn-)nc+cU!t|20u=Af}lN_ z;B*_Tlc!mVwll#0lwidsh@}XX%38}HWdupEpjtb&JB16Ur64&K0>AEZO!d;Qy zWmg2pP)L=d+=t59MgU0ss3c|txG+SjU{R>vWl?}=fj<+F2V0p;kzClr_`_|@T~j*0 zkL3w$H^{S`?_ARQ-#-?=$xPbxWcgN)#WLNv8)SX;OhlC?fM>D0YzTyJS*&n>XBYXd z_BRS!-QPSj`+L(mwp>|ye+!e6nTWYB@WwCZEwkBXf6sY|iirAw>tlXqe|N3ab$|a! z>SXpe&2F>5#l7`QQK?MQlL*5CA4^qbk3YPmSH+mB5olRN3&r4B-f$bJ(k+Xn7DEC3 z9TCb(^7myv*{Jr%@s|J?>0i4~P{rVWwFV*dTRCDHWhm zkmf;-iF7WItU8%v;)++Y(TEZQ_v^}BvD9cFzl0=lh{VZunUp9W9!38*uYSc>uRhnK z{ok|tS3Sq7RHRojXS>xWjl*`Uf2Y*p3btVNbC^uPB&1hAt4?P1UzR!o$Qy116 z)#+ERzGh{JnfHa`7bXyDh?9@wmIzc&Q%nxl zj6z|cQp`>zgrwS4wf(kg2s`6TGFq2(mLV9xv0tHe$@KQO<>&!4A{q+MVIc@2 zS{KpN2?LXB|Iw|idY=vPBv*u@^iwfsxEwsf8lomngzg3$HvkQ@Atgl*tv}l1_VRhA zJU0O5q^hz+d8g7wwb610>D_n4Ek@Bmhlb#&?@mK!keybym>`}ot*$aZ4GdN7-!by) zl2ErASuDFesU|ziBNJg$hqG0T)$&zGshFs0+Ne*e!8=W%9%`WyE!7^LYTc>Vs-RYY zgo*`Lr~49=m;oc#T~!O1j8sO-4Y)zgownUV>(a4kenMq?#k9tFvPZUFtvh-(^cvZU zwH}>I4Cjn?UNp5q=X9IqA{Bn%aoS>~+Bca12&>vRTF%D0%wRVE8C7(mQy&yXub_w! zr<-kf1<^s%WamnqB=v>25o0}WT}vjKvG5s!^6@w5VnDS2oYlItG zj1WE2+gEdo8@6u-t`fmkE$S8E*13802b=f9Bg?_~s2 zp-=yp(dgisz``cUm^HN|PxAh8=>oE>-(90n4uRE~&e+BlOU6G$+#++4#GDBIGZ-_| zKSaI`KEfeUd|LPwMoE56FV5H0Xo0fwo6arGh6QsI`;W|x%iEY+khOQ)gCnzr$yp}= z2oH@N>*-TU$K*KbRMeAeuPZ$PqFv|-{)S!X$yb~!nZexs548Ctpi67>^*CsXr__n~ zKcFWB+H}$rAxv>|i3nU!l(iv6X_F@eiL|WivWZI@R3SAhq%PZ&1!gIvL;xuCttitj zp@GCJYn;BeTR^^tY-%I!mtWaz`iSA|u@8;(;$Oicd~?b({5z#QGh{i3cP{6*MtQ_( z^FN_Hr*={vQJ#G1=0F)QJLoGn$By;}F2~RS#B6)VeKB&WDU0~kzOL{-T+r6IA39zP#ngz zAM6c|1TmHlp?cdLAF6?hPNDoY z9PpPADu`g$!An0n{97-&I4c7tE(=|zk~Ux<=};aiBWq5}q-I7iCvii1dVz!%dL#uk zQ(851s=q5$GZGp_^h{TR2}3S=80Eyl8adqg=SvQE;eDcVxZ@hO9dEAMqcm?$2|8(y zi!YA&N4Wlw)28@CBuPrZsUP=o?>^tp=I}DSdRkwkt|Q_qC{`Fei87}@XFCQF&R#W( zAR3 z&?!Xyo@e+ye@mjF?-FY+`F{#)0*}A%3TvKw3anA@zyHHn(?vlbEWOy7LdSx5#^|fT zI8H{O#MS6#waI?mB{T@%eSC3*2zKPu5{LD9*?55Q;mvk4VJXC~1=VJgx&T>^SUKQAL7%m=2n zj*~kSW$XDZjdVaBciI*CO5dk*A*jF{6H}

erzTj9u!T*KOcH9{^oA*YMvU%*a)skfCO7I(dEP67)J z`xWdg7)hIJ*wv5l`!5XLGJi?P=_2_zxDxZ+$U=tL+iu8^lfXj5eh0e_OhW;~E*@+{ zf#6q(oW$D#uf`KH#4eB9cp^0KA9#D<(g2=gyWPw%&OU>Vy6WSwnxXIVbvCPw7^=uSjdWDh4vd zhVWdb#h>eg{V10XZg+>!ti_0}agvT!JNQci7vS!XTZ3^KVsG0VnFBoX=EFM#FKLu( z-kH2KWi~i1Zw&8TycE@J8DdjOc8Y3f-V5<=#jBRwynFE8%)Rlyb@N5If55HUB}42d zqDAcr&8xT0x4?}u%v+iFQdv2pdv$!Nr1NIW5c`Ro!W)`bt?{*ZRUysWlLu2Fhn@wc zd1wBwtK|G}x525|ks)>$DpZZoyan*~!mAZK^NzyHpf5P5dkf+nj#p(xhS+*vR?J%n z?@P{`Yu-_V+;H-|?u}oHdj)QWP}#D??!~R>o0}h}i)_cMf@a<|cx&_Ej7i*l8Q#Nq zRmWtAmBCw=d(C?}-oNl_G;H24Nj5Sd7@~WN;Jt7m`@cwM%M$z4mEq9bSKuzW(3Mj2 zev7mUX{K&2inlIabr~|mnsP5WADZ_{yf@>e9Ojz0I9|$OFjV&z!+W>CcjzST|8^o? zy4l>tao>kq874#QJBCwgL5J}ns`{ymWF3?2=df%UVrw0ilfXj5t^ykhMhfK`wwz~? zLcwtENx3@8_;VKSOQ>`YaS}!0S4W^qk=KgO6J4&p6WBfQ4={MSyJzB zV#4e^=dT_cRmowqdkN!!kbwNqrvdch-LG^BP=N%{`w7qrPN<7g1t88sD#>PZbU|VL z$COn3(}-mpRB7g4EL1OY1>g=|QU#!PRc&+XpQaAR72-pFFsXjdX$6Jvv$k((AuJd# zxl1t@15+X#VD(I4#Q|RTRi`@jtUB-t>o~z4xo#cLf zQYQ=aPp@j&<$S2+u{NAb5q2|}7UZ1TMARxMqpt%~zTSS`eUWHbKjt4-3rp31(MSfO zqEoNnVrvgC^Kp`(|p@`F1Q{0%8n&oJeOVamnsU@8xdKX>PsUj=KJz=A1KOX1^V z{;B2J08+|K8syfcUF4?3#a)H{Y5&yk`3Vud>{G<<8bTMjslt>{L%@{aYwEeL1n(rY z1ho7xtOV|C*r78tX%cxX7tc6mtb)gy(#v}Jn^nagK zV$%_`zXr7pOtq{wnV|3zsv*rxXh2yEI{{RwZX@{>K1AiB4-2xSpvdh8P^8=N^MlyDnh;LHEU(rRSqb9Z;BwLfL8n7XwIoCadxaC-MEZ_JsNu zx7-nG->WPDs^l(0?kF7V9u@=wG?ls(Krhmyk?Tc%Azx_yLIc!(6!o#0-Py(Fy|VVk z!7LO?)dIN6$7YD78R`FAC!KWTpIQ`Q_T|b_U@GYAYPfFo(G}#^fG~#+3^F6S}11lX6%#*|o04o6s5-tHyI2?fCPH2E;o9h5ne&A ze@0tULeHvqw>yb$KqCxadtm2vu9e=*cS@RT{JT&RH$&t?C=p-h(6++id?k&MS7<;Q zhUhCAiAExQ=rt7PPLF6qE?6XmbPUA-l-fI}$qK(9WT7WCpfQHZ0BWmFRB}I|b6=d| zSsRJPl4yeGRtPnX`767x?oR+vjvCMuM|S|lZmy4Idl$>#6)D_;V*r3+*V@N&nTus8 zz`i><4!|l|H`f1jF@&nE*1`q`ON8CZhlyaSu2=HKWQ84})aZ!XAu6gyS{EtJu}%e7 z2h%s%wPufY@u#H(+=gSO*K0{(mfr1w^?w|c5^+10*`D}|aw1{^5)6!kr2<-DnD2oz zv~vPlF}R?l?GRvNJ0tJFvlvn}=HjKU=3Mt@)cyw3GD+QuV>JN9J|4NSjw|;k|8T#q zX+%qi%@As8?z+acoegqaOR`)Ncj4Fxpz?k>O^w2FFT0tG0j)6X^T3t#y#gv7ayxdf zN#Sk`$MpBw)K^uZWmou@oipHPY(%uiZ~_U9p35I|-_I;Tx6JrFrU z=w3xFRJ3Vi^)J>;5a1JveU1)x!SC#tE!_`O?Q zzqv{h9k7&!Q1qJ9Zz{ZxezO6QFrXubs{R=V{Q&r0HvkS^mqI6i8vYp{Qq%Q}E4SGr zLIXNur~{xzq*52we{}I6-xsgXd%Piu`!F0I=VmoktE?Pej>^wN#XJCR=-07dSV#rfrmU0BVSlf2;P>$ zLjaQjG+wT9yZd#W@8YySYTJzHg=Z0j@}oHgP~qUJ8m#i0HInF!VFiE+Mf{i>^ezHm z^*S`54~};L6#cyHHR3m7b3FDDsn~eyKcm&TjFVPBZNu>6) zFS7+}CD9kdE&!FQGwH5;aqdz{6&mm;jy)bYNNNk5MxfScFDi;mb4t8aEs86owlBR3B!YwR#bce9qDWOI^W6Nj#0`CJ1$&OKD^j z{&>q)elkHHT2Gr!ay8t0hFp`M!58E{d+WN9gE8XqG`m&$Y&us z#X%L%=QtLm@G+)GN|H^|8ib*X2TIdf3MkXZTC^PktP&de9G?6AQ+Iyrp1P^C>%BHh z>Un^k0O|&=M@r#!SMmE(*AX2f24i^wLdCmpM_0Vlm=00ngDsL^s2UjzpiZ_H5(+Oy zLN&;M3=AUxG~0OkdiQf2r`WOA^1)Uq48bwQKVldIXdV@h+{2&xV9_HqA`@a3gpzkJ zgu-<^L(`INk{F6%5r8sgJ!3J2zX4Q6!GK{HRs*P_KRMt1k~h!Gg_ekZjZS*dBs4oQpz_#O%6M^T0w3a|GQ0Ru*1_z^$_?P)(j8)B;; z>7#d|U>lvfNQsXI`NgBp?RF!b{Eylj?=Y0z%h1pix9(WS1qmcQRM1|L1(L_d(IRvL7XG9L0mNR9o9 z9C0Jm0(46Q&@x~Gj$&S_K5wEL@JnB-8l!h5q;z3_zsdh1N}N9k?Ar58#y|bhGGY?< zRz*uG`v4D9xQ43CR4dpmiOCph0H_e2qrfZt0f4d=8ZZSzeIJ`_#&e3zbM$n~K7$XX zFcrg%`g63JJVhJG!*?GDwSa5H3m94k4zDFz17>jF zAOLkE$4|Q+w`C*CvtW-DUc`|JphOu)MN}AEY?J%YfR`|g0l1L*7fEGkr!uvRp&dm$ zbFX4U2?=zOG84}vP~||yELYa+wRZ_^K!9Va2O7WXvZ}Aksv)09O3QP)us_Q*9a33w zAjuVwcv;5S>UoTK8P80MS0vJ&Hb~K0c)z{b*iWTJ;~D6}{u0w%NWIzxm0bQeZEMkb zKS72@&Vg9sHGj->uiBaypjRIInbyBe^fQUh1zq7Icw^`y_$2j5%h3XgzTus#es<28 zznL@Gr@N!v{k8|%f9HUy)Kl&b4bV7xod-4_@bBNv=?9-nLW#V|6SF^d#22492?JEp zwtJwLkHkP9iTzSg+V1i|0S~luKxlxHbdLwF{+98l?&#y98GIoL6{~%oIR24~#9KZR z1}M!#4;0zzfLeSZofi!bNI|Xkmmaumu>;!XxYsa1Rp4s_D1T>uazuUq9-$=E3!U&1 zAARE`d}11)PA1m_pMBwgt9@D?l!AJ=lO9;;Ut@uP4FfcYJRSE$z|VImC9d}GaYz!H zz@7KNgNqzc&%cKO>V5Jpb0s<+IN;_5E+r02L5*dy2f9&v^oY7MT}l`bzZ^>;PYj*p zhzZl2#1ToT)hXhEKDiEfkECK-DKy{;fZ`sQ^}PdDl2ib}mr_s%SJDHyzD%s}3UMRU z^p*8QJ|>BZ<|q@a^ZBfN*WTZ? z*WP>W$Jyu1$)s5$!rMX+)UXIT{xd6`B`}soP2=qZLxLrO37hI>;jBMGa2V*UO0YX9qix^7;s-L{?6T+K918p57zB4 zST%xSE`B8?R1XQfniAR{vJg@NX@I1>mJ*r@Sp5AzffPb&ARUn0+LX{4kQk&E(g~TcE+sS{QUs}kbU~)P zof0|^QVgkw1l~aeAPXTSkOoLf9U=f(1c^f$A!+L||E8@+1R$l5CP?ph5dp{&NExIV zlJOoQ09gttha@2V>k$FSGDroa1v2=3L;wM7!iOhg2W+>kTmZTOajPaNGYTV()&|H0I~#925E+5G$8_zrI2z+0@D98L;$i3 zQUPg!4E`JufJ7mckXFdBFAxDpKBNlL2FY$l1R!1kepN%-Avs?n0+2#T4Wt8-`xPPp zi9u>1osbD%BLa{jNFAgLG9`fsK#C#tkia*H0AwMg1kwOW`4$m?EP})#jo)JY)4oFl zAd4ZTkS0j)7DNED1X2cRhGcw?2tbxX${`6z|4oPhWErFa(gGR$10n#4LMkDxkYTNe z03;t$1!;q1|A+|u=wT8-sfM&eayBCZkU~feqyv(>1rdP6AhnQA$b>dT08#|0gLFZr z{DcTViXruoz|V*PWFe#k(f~>M1rdNOg2W+>9)6{@BLa}ckWxq!r1!6g0AvZI4AKnA z*op{1mO{!Q2}u9n5COwKBNlL2Fc!r@jns23Lw>x zc1X^D5CKRbqz2Lf$?Zf0ATdZSq!TjX4@3Y`1gV2`L8knP2tbM<^^m}S5dp|TNC~6? zlG23;Ko&vbT^RpHC~4af0mx!VDWnO~`!7TQvIJ5FX@+F%Km;I5A?1(+q<4HqzBQ~U38bZl_J=Hllt3CFDLqm{ zQz45WaSy*5A!$8RL(?FOA*GNeNbg>$p*+YENExIVlF>UgG!wEEQVvN#`lllTkY$hx zNDE|eA4C8Wg;YXXA;bD20+4)26{HQ~WoIA)kOD|Gq#crzi3mUnAvKT=NNztw01|`L zLOLN6vJe4C5u^^%1)0(x5r7m!>LGyvhyY|Eqy*9cNg0R;Ko$*5^+ItdjgYiKhyY|U zq!iKw={*<`fGmNOL7E{Mdm;jmrI2z+0@8m7A^=$iserUV1`kC9AW=vqq!lu37$N}4 zhfF&kQ5c5tZ$lz`IHCY4fK)@;Avq%u1xO*J2GRk^9f>GFVvt%$CuBl4q5vs^)Iqu+ zQ$`^QkYY$ZBrqCLfGmWRKpG$^V-N)ozZOB_kVZ&a4q66T3@L>)L3)P~1;`Re8KfDK zu@|BMSqdqKBq06AA_|aYkP1i(WNA+3;M;}8W%KE$iSuQo{bco&irfd!Ci zNIN8FZ$tr72&sW|KyoJ_3XmA27SaisFcDFJ6hZ1BU63i05Cupvq#hEOj3_`BLP{VF zlQI4&QxFEoB1jz42ua%qje#tNltP*yz4t{FAWI-+kY-56eux5ODWn{dfb`!VQGhIi zR6trFgAYIyAW=vqq!lvkKt$m{4`F~(1!;q1AB1@TDS%W%+95ep5dlacqz2Lf$^9E5 z0Et0rA)Sy32O|QIB1j#i3&Ke@kPb-hk%$5$2C0Q~LMF^Y6d*;AI!G5} z%4|dcQVgkw1dc)kAPbMe_?JLwfTSFa2tXD=;*ds2+A)X#WHF=^(!})nh{7C10kQ;A z24P+^Fyrrt1Y{|s9Fl6Efi(L;_L-se^PurksmNoQv@mGbH0eL;Xkd}oW!f+8L z10)KmgtS72{S#4ukPb-hC5Qkd2C0Q~LMB{_ z2tbM;b&xK|l*PMFb#)kQzt_B=a4}uz;%BK8IHN;X6+$S0ez3w!W`NnA|l3zqeqdi)dIXw-g^{T?Q zT~!$0e?MK(eHFECJy@z)Rg{XZU~UfkU#cqlU*}X7YZJ8(4PriR`!GP2{ylj9(XC%v zH~f)=WcN$M$6Z~j!XjQw<4hXVstSWvRbf#001YzOo(fgbo<>z+&?#zP8k9Up+rI2( zjjH(ncaav+2eEAh57A-}+g2{>K;|~6irfxS`_o%-32pn+TeYe%NQgR=#quAf!B9FY z6LlnW>s3W=J5&!}UC!*Hf75t4)hbaB3c#Y-sbio99EpOs zHCRqU6g=#VJ!>|iU?>_aCm{+#6kDY>4W=YSVgF;#S$R<~zZ)zkAqpmctio(W!Srje zoP;P?q_OACMwEWAv7Ce`c%B%0!E8k7kM|E7EGr=jUf+$qXhx#oX=#JyBt*fdu(3+B z5e1*mH&{-BO3DuW!0aWOVI1S4;4O&;Q5a)UJbd;Sd)a)6f)C~zEGHof{#-G(+H6F@ zlg|drNr-~S#<4215oIK7EGHofCS&Xsvk_$!;@@Cd2~kGF=v6Zk1#eI`SWZHe9N4^O zHll=KV>t;?@RvTZYO|rzo6oBmmhJcrb0PWt?}^>npakx97Ud$OB1LDS_;w*1j7$g_ zR=-`YaVS7Yjvq(@$orK(3sP1i+!vu~~ zk=)s^Wx`{2LU=SH_6`jZgSaRNT!ZB#M8TSm)tQYbT;Z0J5G55h>&-?K_R4Y+qF?}G z@6smB%Bb~bPxvY1)A+%)g?TAXZ+O?=c2myPp_cdjx%oK*Pma|mx%e)6#V&foE_z}Y zy`6d{=Bxaq;5R?+Kqrdd_rJI~XXIXW@56xeIUx!*+cr_K$99T>t+ir?2XwZGKi4g~ z=-ek?PNs7fI-Pb4F1zHCb1psaiu2D|u;enlxVC2=NPZ^6pING-2hk7xp10&2y;ljc zQXLz36goeHp`UWq*^bodYq#= zKYTh)^=#<#ROd202|9bl&xB4@o!9X*Rp+N>=cvxlz0P;K7vLwg7b!6w`X#FG4gGS} zr$E07Ivc|-U8g#u;i=96&R3luyxy)lKOkMM`Y7o4sXiKd@w1+D$`2QZ-;gR5Q&r^Lr=(AMcANsM-IYj(;^&ikVPw*nZxq-7a z!O#2`Lg%F6B)md(PO4?l*(-jSd7J8dpW~!cdPf>kTWjm z)j#hhH;3i})!B%Rs&i7>f01X@nlzvD^9$Aa(!f{H*(MI-x2p4vhE1xoADdNYlz(=* zhYJttF7;8J(HyMvmWwA#^SO8iLuY5X zU`D)viOvM0n4<)vHePiO`xMo)p&zI^SIjikIVWeR&gn24I;Rh#ah&Sx#6MK$Xq~Bg zIuG{eX@bLavFco^SE$aVdadeQg;D5?2q$-e>Z~xPIwy27bPgk@%%iGvK0ghe5zK*9 zLeFH+xx&^UfnC1!NB@iua~}87UUK^%;*{DGI{n}$B^^3DhWzfCJM~ca0nph={G<*^ z>fJ{s^`1G<8EmvXb^K1fTlXo@*&j&HsZRH}VfM^Z0uAmt8#+6LVe5Ik>Igv3(^N+T zdoEBN4eYr{bu_ByRjQ-%p8tZ*03cwvyXb1WH>20Hax>2skl-XAx_ZaPoV|lyDXu~6 zIp@l**Q00j+QgxJPIWGn7ggsXSgkro@-@{N^);&R1^q4Pj2i2%Q=NwtFbtk z;Jh591XtDw)fuU<>RjFvROh1EPj$}SgQ2sBjN}Z}8R?@{=PI73I=6I~Tu)wx=4fKG?p0&Y`%4D@@TbBMTFABN88WJ8`-eJu3b@U9li8se_E{x?jD z%s0#M))N^hg19Y-c96Mki+fko5JV-3tnWaut%hlVI98OD*}mRUhPeAJ`5k5Y<>E~e z)?&!hSc5QIoAK5&-EGA?jzq*DlE00Gj(x}F7?PriUQv_kH5$>&z9A}9Bf$I>iZKLB19ar{W z`aDB9LC!x%HVgSV-ezM{3>_EiV!=7@InK~=wZi48Q$xpf3b*IGuAHIc%D2O3ruX<- zM;f9F31JrCd9mtgxBoYXhS%Q-Uy<96n{R&HEoz3 zh+~i|Z}eDdD4M`s$PkZdT3IS{2;RjbVF-66H-YH4{A<@AL$;tnn0aUcix`s0znNuL z;Ua7QX-F2M#$!jQAK{0@5Cj4|cpH6)V|W*znvGKOS|?qr!u@q#fKL9TyN zbwYhr?%t0yB-16#s854(#x^8VQAl^6;16)<&X7!tFt=jv@OorOruZ(FNrg8ush?v1 zr$VY;sLGXYObr>i=yDVqa0K2(W<`cttWwC=?sBA|lC32Be~;?|<7bG)Hwr81Zg5_| ze}3UeLsr;8s{c&g3k_Yd{1~`?hv6?&*cd~WsuE^1Vo1i2Oq(z(&vuK}kc=0DJo`}sbF|C zHY8Ig%xWA}I2R1bbdkx-_{1@uDcCwH9-y~(u;;UkAU(^D1@}lpHldZwfVsK`4PnHkTT(*uQ(c3G zN;Z(}_pKff4Cyof;rsBZTfc?6mJCU~@-PZzKI@iJzz{?miGHu@s2j3K{NH4nbeIgu zGzxRyOKv_Gk}2>WVIf~5Ye=eEn8D||sbWZ`U6=#VC{7|nGNo~P`v{HW95N)+Butj} z)DVC5h^6$X6w(>~ks=G#2(v=_YDlI-m=o`FJu)Oywvyf+R4;~PnuV#?_$a#TPwX)k z`k>IcGi0G!Vcyltj3Jpn$Yg4G0)`;UOX+O5hR2Xa62iQ#;SrSU52&KYS?B@X01R2E zPMF2oAwx1bFMy)d(N znH!Rcma)wHy37s9RFcUYs>|FEtGrc;Y}8fmnZhz9Ptn^254koOl4%fT0RFIo9Wo@7 z|1`_&aj#pkhCB&Xkr|MyE7lOjxnkRdTy~fCf0-^_W#iA#X}xZshIHB}ObnyJh8U75 zC}$ZDqru1;lBpJEEy72}kW70y&%ga9*SXdkvR|dol39m^$%+igGzoK=-fbF^DO|-e zC+H$GBvT{I?>dPL!FZ7lp;qZEGGw8$=jd*Zo>dIVGz)W(-k}(hiB+)7ExMl=lBpG@ zw=NVz{0#`MNrcRhOx|juR{dEb| zfr_}N8M08ti*(1Wn2aHr7Gaj_nl&U-T*)%$YvT;b)C;q}uFU<LbG*g z8j`6L=7W%HoFSQ3Vg9KXZ9_68FVowbx-t#PGzc>V!-`EB|Ml1@?$N_)6zV_vZg+S$ z#50ajSWe^y^u}^xgALhsE+-+cz$zkXsARl~UY}d*=D(ry>NN^^38oSkhoK`2USS{F z&>UWk4P{9QvsxFZp)4t3uGa->NT&2vdRwRqR1xD3s!6D)b%h#|DSVA>UxkX;1Vh)n z8gl*a$6<<1H)N>}GMU}6`7s=Z*!;?>S>#DPkfJ*v{^|h=zeuN9sB^z@Wemy0UT2}- z_348lnOZXa)9_4+UJUVU6i1m(;Xe4eljN{508Aw8XOyv z*+cyrDp@b&)9Thx$>^Ib>D`KlGid<|W>vi0=& z3F_;~{a+NIBb$Z1Lk|>&Zi>g=MZp34>ufVbQHHHn$Pab487kRHvfqO^_3*KXVSxYj zru;opPvMfuUKn!p6J!QV(9t(kk45z)8+7Olm8=u8m!6jlmFyDoYMC17zf;J$wfH{$ z&QQOGO122u9|7QeF;ud6153Vy0FX3PvYuq-vpR7M+5hMVWFE#JWU`ncx?d^8iC9St zj3J0tA=durDl!C7(m+>7=|nOF(I7-;nR8_b!pr}VMefykVhE;6i1#>2P`e?BHX;7P zlLR))5JY?$Y6v6wuR{eNk^M-6YDgBds#?fq4XUA%?LrPy zzlKVdHqz~ml`cw#N;V0({6Rf`ELZ=^7JiJPnOm_2I3$MXNR1G0*SfwNg6I(9EFBU< z5M`gx)p#8eLlDhEc$JSkUxr{}pR&kQ9TGzjwL;YBkQjpK6k@pVTp5BWZ=$PD^#oxE zB0-{GN&))M!AdD`$iXW54Ag)koWpn%Yskpp@Kh({lbEc0Y-dOkdrp^-w|(GB8j?iG ziqGlx0xfCC{f9+cgxyD3LnVv9V9~R*MTSb&3pqU%%##1z;96v;WF<(i*MNJK zH6)Anwo1`g-f~3^l`Q#^e#4jt-0BULY!Grf`oaJjl0--IzhcSX;g%(T_f%1~O4zqG z(~u-u)F$KzOc)k5)ROV9>GxR;gQ1pe6!LD3t|3XBeGl!KxE(h>I4}haQ6k>P5}R<0OFGS58Byvi{mhcK^(gg1fFiVQ;`lvZx`8qAjssS#&6t7)e8t7|B{8 zkHA=wG*q%v$SU-Lq@j}K-d1Vv3db%}R5n4fU)FDK1PobY(QjY||B6!sLt_{S-Z(mt zn1JTi3BThn_o1&L_?6cP-z9wat+@ZC11N%d!JAfIE(R4Hte^#7PRYSHO!6(l_s7>N z^8FdZyx^@YpMxhbzq5QJc$POz^7X>c`UTg2Hel8-4j%}ng$jOP52D-X;2m{f_&{(l z`4e$C75=R)E;feI6ig2puvXz;LI>DD!z5qwANud+ed8Q-Q;eoz76r1=fCe#m1)rPK zfuZ9^gU|1zgA%R4Fj>Az_-^=2l@TzE!h0Z^gYs>{d&f+5U+!fX&4`Y62Jt^w!90v1 zPe_L3=ZS+x;cx!geV%NXV}1>fOIMme($lMQGRz7h@KmB%pIfWj`Ge+Dl{1888Fte^%4nB~}-$Qvg4 z4&lS!yTxOete|W=%b$h^Y;2%mvV8M)e*QCfA$mvy!=yp%FFJ@!aUnKL_OMp?k2S=G z$p&-^|C~nHFzLU1hsO%~X^0Jz28kWyOEJcr1BPsXnS!f!GH?o3l7-Oz0!(;s7?#`3M&K*M6k^S|y^i@`D4LPIqG-!A-Q9RkB-1*HY7;5;1y!({m; z;jhpkFm$}fov843I=BMMj6-0U>|u@Y2^|8%{GC1Q5dKx|p<%KCWp~j3aP6R>@Sbd? z&0>(>?uNjS4RFgWb|)PS(+C(c0*j{+c|3n)1j-9p!5$g` z!(;;z!oQ~xFibX}=q{FjTq9tZY(SmxX&M1T@ECt-K$jStfgW-Q43ia9EN2A|VvNZf zCM#$W{!vsw-Z04*-_7#<@b#m-ni}R$;rUA%P%j2&^>hvl^QYKO7md+D#dfz58IpIi zyi)k0W_K|%1V6^{t-^naZ<3aaonpSX)1c%YRU+d0dt#h?R|j6F0=&VhE}H|Q)kOpbBs{j6Xgo#lo}zDf8e@wAL5 zB8M3N;`Ij_k z7K5pn1B`%Sl8+VBL5Xg~hU8rXYlZ(wua<^MzEk+AdaWVKU%?~|${%0_ll59)2wtxR z3E>O10ftGw=s}iWp!>Ojm;usP&IdgDdP{Q(4b%>`<#rdBb0t^^J7}Sfw3>{-b@)%?A(T62MIz*Ed zlYFJ{Q#8beN&l_Fr)q>#Jyl>3a21sNn-!d{Av8?#4Z>fb5ilh0I*|Ve%WuK086#kr zyfv#5ejkm1XG+qbO$-j!2pA^s-Qsap&)$Cj`t7{=Z5D&;bP51xL;)sySpEbn=&cS6lRZcX|2}w50mCF; z^d!qia8u1WV91+meLNui8Hj*K2ULj3yYDVB$kzrKChxv0%2+{-Ho%ZPD!|=&i}34l zch0+e!|2Yt^WvvizQ;HE`~iv>Chwl>#o#f7h&?b&^3kWoL8B|zLlM^+-N!10FVOOa zNxoJ1Cr`%y!wQ}}*&#ME-O5XzVFgQcl^K$E*O~_5PtZAHnB?=zS$-}iF(Yf3^j{_X zf!u%CfCCj_pj&;L7+j1B=)f?^$Dc(7nQKpV2PQ*2g=-`+xDU2q#@;ZwK^3eb-`iE_ z1ytb{tUF+}7;MsqONPk}s$KZ42n}0qnB0I$pJN4o;qy&4z%V&>O~S8`vBCUz3dU{c zqnE-8ae&V%#ergS@M?s=UI*7O$#;++U?DN=6KHrB?}n5;53>ISrS5*tGX?C?_)tv9 zZFn1+r$j@R#3R5L=ygvlDsqPEwN}XSc$b@`AxZpgdZ&;h)T?2iKvUfw{QUVv`W^eQ zD{4r#Zy=HovTv~?4M~o~Q@~1=JOOdw^AW?;KyeeuIw5z&TuDQgoQ_Y*guMDrtUtCF ziXd6EsNyC1{ZilHHzc_OOIygNv{FM>3bOcRlpOd%gL7xtnM#2zJKwE{l7w>G4FvP7?Nflk~rGz0K8aD{>5yp=t6ukf*8t1>l zA}1?jn2qP`B+^6b%VDHVtXJ*nE-!{?Yy4GOpPc1Zq{B!RGxvz1M(JoHlVc)yn~m3| z$g`ePpl4H}_{E=K5M3;IjsB*!xqqkNFcN2G&!||@4!7@OX0zO3L|Vdry>t+Eqt90TCwJ7YWV*(tr63&v8rkM^+)~&)Rob}+|dUi)XY7o-G8)Cct}Rs z8_Yd(v&%hmv&(Ik-1@IvZv9s-H@1eh*KBmT*KBmTwUT=PUVNe71KxGHoszpAA7L_b|G!$=D=_lWZESlybeMSA20tzg3z|JaQ1 zAg@w#`>10>l*neWJpKw`Qnu18eG0n9`W;51>sUh`O=wfXWcsu3IU9$OO0kKWO@r8E z;bKNVS#QeyLZo$fNBM8V(U@v&sUhsMDN+35kF2RGnx_8;^OXJ^;)_aru_f`o~;*3-H%uWv-=9Zj8{MR02?lxeZV)na!_~S4Vdmm=W>08cZ`WI^BFj6ZvQB)~Y zJRvp*E_1v5fy-pO$M?MK=BQ``9d#Y*j-U=BFEBHRj?@WT{D!;Pa2VNSY?rVfV%y|X zD~FMy4^US7!+DP@I&;$Z-Gb#qyM3`#_?>D0$F4=}&c^@wV$>W){>#k&nW)7L$QxOs zoi{|8Y!!h>J{39(byX<&Py@Tp+&?M%g=qwpTV09%T!vz*EFYt<&^Y(8KnJC+c3HdyY2 z=Y#Ey!|fN&LjbQk&_xl`=^=G7(z$q)(e}8`J4jQ zVdN)a(e&t7Ec=IE<{U;IWhOgT3B~J`{xGIJ3pU9{o{cCMj36=C( z?UTdE>&7++yIfg^kzb9?Pq1tsc8Rwed%He8!Ca16mC)5%*kR-|VbQEMVJ|{qPN-W^ z78~dCZ?tTjO8VK#E_3Xg#x@FjlCoET#p~XYZAKS-%fdOB$y`AWBTqAv&8ikQU5CkG zu`KR^N~!ew6WbXFu`kvr5vR^>ku~4>ks;y!!be0VjqNdFmstx1bN1S8)wlV6b8A?z%i zvB}O8j6CqYvSn1#k9*u*kWLkhyv58uc!(3tzmd&_VAVD=ci^!fI!C7}^4TFTH<24R zsmmCZ{)5V#-DpJ^AzqbcR7+*xDaFocfnp#fFOUsiKbl!5mGoOzxaK_|7`g8UWG0)( z`e)+yzMR=Zd*RT=ZG%rR2&v;R)@t@2HhTWhDC}y?W)bLUhL-M?8k5|Qrv0RZg$Zv z@RvRwdk$CNp@NZzk(u4AcgD_%g0H@bRMK)>88#SNzEAguX(zd%z{!jFC=SJ7}6+}E$iZi!o zl;5wzAIyg;vHL?mk4?u5g4h^7VrGx-QQCw!AKI9)*PiMvXQThO5#bS8@t>LdTDiMQ zy+ib;jR+4txRFHq)p-4m4ZK4z@{44K;53R|MK59nzrZpa)D_aZ%wIDiJlv~h?!c)! z=VvIgH32O8FUfZ61{aZbDgHXvEN4KSq8d8L2B5#(i`r4H-%xE;Sdq+|WV+|GKpt*3 z4i>|HcrHn=!xd?`jZBaH{2&c6-6Dm*B4cccKEtaj_3s}U9(hO&a|hn?g=){a@f9qiieP)-HDe+^}47=QlHV&;{SQ+{KD;8kKn;{S7pi zc8toPM=_7t{&K~gc9mvVOJ!X3D0d4|J<5ML8?&^Nx#>}C)jUT!j6Bdm-{^2TmDEx< z=V~MuHGXJHLg+zFGL1Sc_maN z{twF>g&Tns}LJRC+=GBb!-P)Vi# zwFo3Pom&*4BFIghay~^pt+X2Y)2Ghr4@E>ve^5=~AM}W+R6=EFA*KaAE^cw?Y_S5^ zq#ChB?G04YyKseIpB+Zpn5kP4&u966(x_IA97Y~tW{;F8ieW6^s!FUjeC5t14kMp2 zlZ|QZG7+K5AeOb6s=u4D#82Q}_7j(gHToyi;U>~xk z_{+zHM@_6|Zu-*`+{ieLe8o(jW!kBXy#xQ*nPu+qo5zH+GfKBJvoBuNdY*wG+AA(%3@Gsbo=!TOex3+Y}0KT7cKITbkb|4Rj*~p8u3f+h!cMY}S zhi`HAe^;b^UP>UFjX<-k1?~f}Z&1Wh8kNHPC%@bn&0xpv@U#GZ>IsV09 zAadJH=Dvv9xIUxZP0diE9}A=LbwaGdBBG;jzxS8z6&{t*CAp)p88LT!lm9Goy^5RJ z#$WNIjJba*?u;<G#oRzC*UU=g06Xhfc)5v{AKjJ-&Yze}F*OU9u&?UI{3_xkT4H>>mx+U|DB^*4+Q56fs0 zB6+O!$Bl=R!aHf6Jj42@FtlP6aHHs*FPH+PgCw&HBd{vu#1x~Qo}@#`7WZg`-m9~WkSsKe9^%~|M`KG(9ZeCtH>12zrPjTO^15a zC}ZOPTFOkSBbS>zbog^7pi^DU9hW>~_|HzjmQfL7Zt_GSxqVwC_x=0bl78R+azc3Y zA;tH=H2r@*?OMgm)NYs|QT&e&CW5%^)>9dmJWKcoPefhOdyzYKD~@OU&ZN*8{>2k9 z1eKB-#>I%!b)QZCgUoG}+~jG&U(ei<`{*HgI`BJ@o7Et>yPX64ODAH60YPb?v~x@xBEX# z!sgq^+_CfYDm`zjKVULiQLuuQU5AfuS(%{!>ZI`StZE?^;8v1^pd;FaNZ_JDLQn_~ zr)pGi3@YFjVF)#lm&yBb+nR8@&aI}2=Ba}aL42!w2NS&V?+^uaVKH*jzdpzveH=z= zB=cIFnYhb2jC4rmb`Jq#?smaQ*#j*90agPu9Y&g&nfV?bJaB?GDWbO6gCqvjIbyUT z+EYuS-($$&Rn+h%JRt5Qla-uUxVz@LMDAhAA7ZJe&`y>z3}F9GkjcvX#4(0A&*Icw zLaG@pW+6jHEm9}cx@H$zL#e7usFk1UH&Z~73!og;D;}oDr}PC&Lpfr$2=$y^CJh6D zY&^jHH%sL|HCy6+(}oH&iWeX z&TjB$C@pOlD)$>l846We%2G$+LsOLU0;<5ld@L{F=Hc3l18=}ksKUottOze-u_uN? z)d=+(4x(&_p->${9jOitg(`aj4!x|K+g&k3;hM>1twGq>4#NyKG4>>>C}Kj&P^em= zmT0&P#bGC@A!$k(W(K_Z*kW+@%f|V(j3$HecY9p>hL1+K1p@`xw8mi>ibbWi$`HnA zN1iN@qNnKN=~LW{HOv+Z{6Z~tV)2Sjh*vySMu~}QNSByg(9=0FT>rzi`)<2yXy1}^ z@a+9*#$aFEVza4+($N;79>;ApDMJZQ@iQ#dfl10?a)=Y7aP{Pd?D4oOW|+zIp@t_~ z4vYWux)Yn7hm|Vij+32hLnT{D4(Xx0p`r85Z>B+1N}i?3>$tJwZfLmv$!+P;2YQ8L z=yHQte1XZr>1QZGx!naNe-(ZFfSr&N!w}UV)p zN?xVcLojdoJ@O|g_CTfh*G-@@$79E5j}2>ZWqpmrmcQIQQ1k*|s>r0TN831-4kK-1 z{tLDgZV865KqOub>wasnm2=G)R^Z8eBboFKD8jLH7%6z2)&78k0?iGhxUnPQWqyNG z0vW?N7M2t_3;P5K!wNiQeuKV#|Jn7;5N&D_;*2L8VF;pd4NH8DU7pb}1W_ZzN)O#2 zvr-XEhbvO1MHE4l)zDYWEDiC|l@PC~r6Eeh*0RKR+FnBtwL%>71kQh~9f}~BPGRoC zh>|b_QT`@$tmYNTDqzr|s7b@MWbzHh) zFoyr>6nz`T(uZKjVlju2O38eA57#7zkygo!A}HMS97amsVe!v&8al*(pkn67IIA(P z4kP(>JpX02sHq_zLPV2VvWelzZH^V6^v4-Dh$$^3+?2Cx2xf!SCa zWYTBiVnN1Xq~P5=+Kc^mm;3^mqI%$emszxpUEDJ}|ygQQj29 zE{dK%<(ThEId4;Su*#=(|Tqe z@QnY^Vc}uZVjF0>4m%YGZ=)hR5E#v0QePmK2ZTti*!~?~t4a@k(Pifp`S_uvtshkJG(5l*`~4^$?mW${M3n5x|{#N9dn z^-*X>TA=4Bb|cQ9)zdb7iFV_XPyJPS;o%2FKSFNiOe{*7iHf{eB9W3U@vfwvvlq4q zMs}a?RzwikR%V6=WBPG_A3V{Y5J9I(8ksu=xm+k?CiL{R1aT+8Va!b?;sO zU|zpN(W#eQr|!W%k^eDkyAfLmH+)0w6i<)6`OGIf%1%|$HXOtA0!Hh>kNvONh&IW6 zrOkb;Slj0Jo`J_9@lWV?4xX%Y6a@W8jtuXa)kq>d4^Lp&x-*IXomfbH3qB>$x!iq3 z-09yl0}o-UnVFMZi~gD!;b9lFGk0LazOK=RGJUhL^*4dlV3tx3j~wUrGeb_Nydil} zoJj{`fMb~%8H`1f7%$QfsaN=s!_k7m&*));Mt;LY|FXlw!~51q?ibqFFZ_pS+98>( zI>%bS_1DAHEBhP{(oe$agyZQjQpZefW3w2{RRf2Su3ZdbU%+_aF=}84XMq5^TR~P` z)r#ejYAHBY?C@(et{1zQc2dU&OYVD_rcCq;XQE%_&CH!z>~d%MRWtG7Ng(rOHy3X#i!L4Mq!Chjx4l9A|(U@99 zvn=^4X<0!%r{~?SES7MxT` z?(*l{MP&JN{v6~U+{WC2KjO_?Ix&PQ4KDl+tj0G^J*SVJ5c)Lv8)k(E;lC? z{=T~swZBhlSXMy`3%rcJaI6LW4`*TBRts?+wpFe(LI1|th=02fPiToJ68^ttqv56B z(@%ls{^oM~Hc9S!EP8HOANxF1yBK9lqie)A7ayRp6}f)! zD8#>mnc?qrRK9bgl2!HtbDK0*(7$^o{<5H1i0^dQ{BELO04LL8tu+4;E4mLn8&<44 zckiD*f3@GCBf9_Vl~ZosHxQ`zu~XEFaqeh$_{sIZISR|albJc)Fpb!cZsYvEMaD{I7!jfnljP<8Lsr=GqNOP`&6Rxt7dGkdTU);Z@`^e1HG zOs{im;Fmgo4Jyc}l-z_~N49^&%-t97ViZM9qJp|X{w9trA@~kOvJZ{2% zrr#IwT$Qu*b;Vd9O7?G_pj6z2r%eS$lhnqo?hKM%$=tUC3e)+Rw4f$vd zy)60I$a%aSI=3WVx&!z}lhnC4 zbW@$nxI1)=rTp|%ovr8toyam-s?*N?VI^0k{nJ(M52Im&9X}fZjTCgDfx$6oDU}|% zp!ox)uFEf-8=f>Yww)I5VVP4&s|V};k8tI7|EIa(p7Y-E#*a)KYSIRh>_-CwpytUQ7|*2JLnKIJthil;s=Tb*A-NHm@639 zF(Z}!(YPiiCekt56Sz9;zkYl;+^rnkmgk$;o1*q*AH2%?PFv@@HXyP4T|hE+}_ zm35*!Es~3;pwWDD!OH*>d|Rwh6uiD(c#9Q6ZkZ%lH#{UH!LESm+Gz$aCsp3cLZt9V z01dk)3T`9AJ|o7ZO!S}`CldDl8ethJ{FnK!nCRZEJ2a9CK1aUjxLM4r%cQY$`C(?Un(;Pqi;F=TTU`g>mjYcYIp#RRg z>CP4Y@%cFevWg3nnHAJ?gZ@)F#ZfWR>Y$7 z;DgF2H)W^8KUM^Oj}fatU@zct5N|G)5G{Lf0%k+7O+@df56qZ*sPJ>~Iaq^|8NtM% zGJtc11I8by@`JswU92!MqAOsP&VS6wf9yFDf*~ztX7EBxF;NxMVZ6@KXC@Y!CI&nnn}BX$OCrX*o zlNq!KO~GJGj$5}br(SY+BsH6aO??UEp0W6_hKXw-J#yU5*qP8n`gHFs=z~VP^WnMByFyOC0Qx@Y4X5)$ z$iGi;`bE&sQu~XcU-*Xexrq7uJN;7Vl|MUuv4_O8L2e|umcNL(gZu*Mr%rSEmq8zh zk>iD%>+t}5nnnEt$n+eSe-iYb-JE_h^zYGo+MEtajB@$sLHEWweJS*d4h)8lj}AI;Twe4YfsJ} zu>|rbJ2)yh8lC5Lnnm^E6nz4^96_+<#F@Ze8>tb67i3YeZ$+OZ#cfrTX@STKR6$6){#}NgZab#K+4EF`)%b%PDxv5a)F@eS zwv9@!c7{WK;LCpsS@5NK@bw0By6sp4UmaA(acD7@G0x~KcVxTNaN2S+AoAxBT%evt ziG4XCC{yqEUpXs0KGgZN|NU9veZ0$7x~*p=USr}oe~O3j#LsZFCjRn`+gW+Ye(h|Bm!cdGL+1iH|BfH$9vWSM2K7d|-QRI` z*xP?HE^(_65V_>AUPP#Tw~R>`YG%lE!O5a=mH)zgZ1(AQyQggI^|BVHbMrraxziaf zizY9s7E#V@iz=^17Fk|&XRdTnWNQaQuO&dmTn>LhO|2CXspx=6*8`5;>p&8ih*>f&K8)N>Ta2sb{ zWo)duZ`gSEg>GZc|0S;Dj^`lw4@5fjc=Wb&`1?V(DKH4SO@kZ{37ZSK&}}|sLAN=< zm0&I!%@h9jbek(-=IxMB00F>uG-Dy zK%6*tGua2b%5D}Dv0LnBu`iBWFR>c)x*vKZ8CEauM=F?MbH9O!;4FB4$z#FUXgz;M z!L`_F@AF?~A@&~YtB8r6UBQvOOfsxegt2L5j z7p&4^X7sYAUw`EdXgsr6w3>?CE#X{TFS)p^HHmT@t_`dGhtI3*X}yqiQkQ#W(6XVjdHiALF)1E=1qQshJWWMHf<%qwek%&DChb)E z_hjqof#=-(SCMsNx7CN9A6^{f?jQ`2zHu2SdY!(Z#9#L}To4{QxP}?2T>r>S2%C$G z*c*QE!f;k7{)RvB!tfDZJB~%HCk21wzy}>?;*}MKk;{?S4}2AYq`6TseJnn{CO;j* zpU4+3J;Nmf!wz;^!4XK)-`98oOM9MhnO=9Y<4=cVStsR`Ak1SAYjZriZ)Z ztQ@16o{i@;bU0u6vaK$CuBPw(&RtXbVWc^1>+tQPyw>o9%JTJiRLS%?lE%hywtkYg zKn)H%xM2&*rlf;P5bH$m(D}|Vz6V1%ZO-!O*qig0WUyoP{Us?%{vY{xE>)T z&pRAe@SXBJKJ(`?91qCzPK5kB;N3!S<^P5`!E_J)5eywp8to46yk4-tHnf-qyoiy1 z5iMqVq^4*8;L-;oP5UY8kdHrTf2yW=t0fIa#8{peLZ-jfn}w56zx41Sy$iWm4L;G= z4|s)SfeQ41*M43ZSz+1~cd+5rQ7XUzi2NuB=g{4_KruZULi^V+Czu|i=@{k&(_W5d z+z@dY2Wh%K*Ah1t)6y0ZsD;krlp_rYA3?S$+{dkYoA9nl43$ zk@n=ylm_P_WIUj7J7c;JmL1bvTATy(hq*Mzl^5A2M1(w#Z?q3)YVD_Av5^X$N%8K{uy@h~Po8#kD}IR1Yli;NzqkOqe_iS$qRy7V!cF46{Et?6`p z3dP=YVlhImA|gz)6C8pLoX(l1Px_mWdwZsN$mx!BGj>*w8GEV07&L%sKD1$h+fX6X zOEq1BNyOgrWJLaEO^?&`$p<=qA5HIlxJ&PkG&?u|{#kySM&Q2_@nyfvIFvjL%J*|m z=jUnq5`>KWRhqsYX>L+575jRGm}#B`*`r~okm3?c^4jv3} z3FkuCz_l~_1SI1GrjfDkEcXKC@tVdBNZ0$gj_*7%O~7ez1262n2I6R0y20;rS$O6M z#<&y0ADpVX#HJe(u!()(FTO0CJsbzPst;IJ^Xr*71hsNb`)BVQF!>IUQmKMO?O zqjD456bz2?|F|stul<`oB!`FFEux%(gNi74)Y>Hq?&b?N(q=TyBUDP$RzI^iJUcY@ zsw>aW&O7JI>?N0FFTUckt1mk5{Ot3WE?#uef{T`9Uvd7GS1nqSz2w@(=dbQ|Wq1YZ z^!+8_>|S-B(-%%iO`rR3E(ss+U5!nI`-44Bvqxt3xa>3B&arRjV%Uf?zHoRQJ}GCs z?B-`7mMYWV;y#mTkI(ecS>RT+e-yWinAW29<~HmuQc`?!o56 z^a9*xa=);rUiQSR-&A)Ax2ISmk8*ae>ZN@f9;~q3Ouh8GCtTPp?D5tEnE5pL1rL;n zD{*My&iFG55-afOIdM6b9C0+(I`L9eOq}{RhhJ#=8C1;t5B1WYY597omM37`tZsrK zHypdGij=tYVqCJBe+dc_4?+dR&o!Nf9x`o@$*#~l_V3YP<}X1-ygpnG|HLaXl-$T# zv59d5dmlsPQMp!c;d6ESJqc-UQZ+Cjj)!C7i<&-I`9oVZ9~E#(r6XpcMsaI0FQ)|GiO=_}uCVOT&kE^Om<_*@W=4De^G2Zh4_R ze66Rbu~ew9i`tO}wVrB9bZT$t&7#L|?F+q4^x3Wbpm&Krai4+E@B9(=sd-Cp*b|9| zBr*4s=3z)YBZ;l4Q$z?M=!l?$rz;Gl~nf8Pkp!-QlCre#0@>hj7Ms-q<)Ol zUpx`1KbYDhZ^67ZZ%;w;_RUPD{p-GihV75!1DjVLwJiKs(2KSspBqC=6ntC}7v((M zc#e*kXszuoH3^}-_t-25BO9{s6)!_YTB3PkQ>^83{{d*Q_? z*|_)3zT~Pamt-$I=j!vb&&j^{(#x*7beBzcb^FcX&7mGVCvnf5xq8#B_(~M-*j2LC zA_w1v|7MU^JGt%-S?iu?F}){rmZ5(7OHSwhJQ(@1Z$f|dIky!uzpTpXEOYG(Zs(%@ zZM8ekQ(yVC=$<^Oob`&^HR$XlRKT5q`pawFS&jOMtDMgLHv46F2BW?a6>}q?e%9+w z=T_-G;|>|r7rg2Y1k~$ZbUL@ezt-T178f}a+fV@)I`!GB-36Tbz~`OL?d!VNg0kqT zZ$=Nez^QLU2dJ~*r&R`J-BQ2mji9Vq>V2PeI=8#0(1C$K`C70$D}o^&ZAdht2Ryh@ zzq#D$+zzLpVxGsSKaY^|v`IaLA)w9(l%WGW7E_;%A>h3{^~tcO&h2jwhJg2b)O(@> z9&h%TScL*3Rev5mpw9g<14EFldO3P9O7*`mOw<{@h3LR&)i@A%-AV_4g48>fC!z!WfTJeJ+Mzyy}zD z0qPu*J23=%tL~u#6I34p|2UXBqgM1_A_(rck7EcXsa}E}OjiA9R7{V+5r>K-T0T2wF%NPQ1D;yIoA zX$TQ@PO4lC0q5!Ls_5X?}$9ucL^NqGLt_*;|1 zHSrjRV5aInp$F7CIS)mFBUJwpJvdVJdW4iZC+e#hf?2AcjR?(F{d^1o^=!!Z=)h5` z{}V%SwCEWBap=J@N^C#@>YUjhq6c$S-v<%;yXqS-iKuhNKZjvDR`tQ?z;UYgK?kUF z_Sazu=Bj=LI>0kO^RqAn^H9Ge4nPm)LFW=#gdsRV^%@MpiK@rY1L|Bl+b{$tss0x_ z@DJ6m#}H8GD*6-^oUHn448bX?htYvkRS(s?y!w&l;qvaG{73x1-ya^_J^qNF(d(<# z7vCQ~{{Izs=D}4}*&4t1kT6&ZK?Ip((nte>QBax|3izScRm=VDefGHK zd(X}#S}yWZ_ih9Gc6+Ap0L2#qaSjhBX04u@{BW4Q&-|i<^XPZxJBRX4>iR9_Q_s=9 zEz9~;q&;J<;Sa%m*Bf3!pR4qj!QUF?8y&~z`IXR;v#tFB`X?_kJOw-?H-^K=b}Ic5 zfZWRfs0&yV?Sjs4HO#LwMeZGMeeML0&bIb%Sm5d`YcI|`Teewy3V7lX!@a}drxC0AoqODLP`U@X`1|GLQ3cNq< zG+Y@R^%K_tpPz5-cY#-Kw6WVlPVmiN?ZHX#8`BNz_Q}hROM_4DwgGj(%Y4!%;0oU7 zY49S%p$+v1FGZ}v!@&*UkPevv{@mCenWz2>i5z%(tGM2@?nRR@>yP1FK^;YxQ0xb+O{p8{^+I?(`JheMW9uMN=mb)_we%5A0=IVAR{%H6v;Mb$7wt8CC%7(b z*E|iuWkQ!_Q!1N%fp*{wSFSGLd^V>AdV~MVv1DA51xnFA4E*u8Ht7iP2bqQ???;@w z6V(R`(!$x`Nv?e>9e92kbA3SY>O*#;K84>98 zqoBF&peBKp4jy9n|m{})}kcJko&?i_Z5Gf_aDARl@KP6$ge zC%PS<0pEpI>i`#^^K4F73i~OZRQ;u}i(NEwVo7chQr=so$ch~0s0=~g8X%`}+)c3t zc(BX6COFxZu{OAWwL^Bv=82lRu4t? zI>A=($G%fp#1?llUxCLtOJyC~eE~VHe$Mt&;FZp5SSnWpSvq!`LF zgvGS5%xO8wB3uI8(oZZ8*kqcumj!>}qLGyVVPoo_jHO{y^xe|p`lP_Kj!jD( zeZgP&A!WLK`~ZVODw}YbnpAfD1F=9@emfHdiR0F;;Uj6k8QE)6+4squYj~aO&3M{B zbA`jFDitemC6Z@++fTm_{3Z*le?Ih2AFGULwr@@z^UvN#xavIH^eOPL z1@8ZqWX^N@mMgvLyOt~c&>fU;!C_lWZnYU2u7!o4T7M0H?U|3|8p+yvOIXkvE1&%g8~<5L#!W>56|;Sm9e}I8#Sr?d3_J5%6d~>^MzY^2$r9p<%%Y}!8Kn#{{_S+ z4$0>ib-k$%{sWxT{4xLk5BE^_@SwGCLb2dgPsx=Xm2~YDeuD)x={9g@HW$a0F;QW~ zi@rFbIL>vwQL4V*Pya2r2bex#M1kZ3I%$A#5_YNGDX5h7!NMbbqY5zFSxkKtVDiuj zVSU&reZuCs0AA{+SGxAJTZ977X-`nEfCn87Z9p*`$TBw}1)){W3E_bYZ2{p2+%qaT zedtOT3smTbf{Jp$-X8H3v<_~x`GtFqHLRdD$a94XA(ats_xgC%)n%_}R@s$K31nSA zaZT`1KUp2{_XHAgPWXa5B1ODX9)uL}+M*xgPzrb&hf#k;zr{GWSpKg}^l@LH5%@1m zq)v*8&2z8e;Y2x2qzE}@f#K%hlP)Gj%Ul-I*p}ex?%e(ZT+2P9BIn~C!) zitLn18FvUn!8P3H4+H1Sx52_kd?JPB@9_M6TwSdKQh(s;t*coI;42texq$-4tSEE zUitrzU)cPiJ_r`W7PV950S`Ir7Jv`?!ooc`jW#4)&I6Kg4Hu0*I}CTF6HZ0ZH0OKZ zEIggYDsM=41=Qz@vikQMC3{uwa26?{DC!F@2cN>eHFyPhiks+4aCz_Z33#ceQ>($7 zSWI&&>9`MnpiQg;7dR(`dphUVqkjdHw^*n3Jm~I9C;XGUQY9v%-OnpA>FE<~0atXE zZUwh=>rled(0#rVmiL^yyTCQrkPf5d<#G85`F~Z8qC|Rd1boS@KzKH8SSQf;oxR;} zE5q5i-r9wqifJTjpaJgZg->`m z))#eu;FOw3+0kWWFIEfh@&3w`65O2&Uzch9g)g{8D1&N{9cvFGk5O6XilcPuE@ziM z`_*NzPOmQ((vYigXK1%i+7q1bvhNR`;hR&+bqLed=9F@^biL8%#$En(j_|>(&?Z$b zR)hB@T3`f}>#S2+)*jKQJso-ra}^!~HD{yZ#8_yTFQ5;gZ@Z-^UCZTQ>LdI(ny<0b zp?xm0Y{|b7=A;Mu#5&PsyaasEmFr{h?=!5A^1BZ`f(hsQ*p=WGuvGKtE0(|b!pi$n zJo;?~Z}v?H>lbpmYk+dVa-L}9fhYR{!oNc&)JM5tE4K*Y0UNHhcSH|#%AL@C-bXp( z%kCw2FKe|p`L(LyqH)uB@zo@=>%ors&H3f{Sap5t=lguRm}+jUn!RImhX#YF?u=DO z?rS(?bu+5psamwb?dN-4Pwj_TZDN(rKKd#x(rjFYo;5u*DpKyiSebQD0|)PsH$+8=nJTiQ$o>>VaPyPyZq> zUgF*6>=+!g8u)e$Hk|-mAsBQ5eI7%%f%Xdh^EUp(04?9KG5OvbgY3U$CGAbl4@bSn zqpQ`9@>fE72ek1)doxzFawPVS{mCu%#`?u3cIo7;`|PdQ;g!=MCe~=r8ROz`G~y`6f6}hUevy ztenHG)^gO_(N8Zd{t>HVc?%ob+pj>J=z^ zP}c^&L-sdOE}_&$R#9rhUwN7IAdz1$o9ZHam5)%?qFlnl+VQ89Vr@ruE-#+QH44f8 zA?j+s^njI}5ry84mcnXEP~QG(B({%}k^Tu2i)JKdNU7PKHxb!sv}=j{WJcjh{Mg8z_FpM49Rb`H22{ zo>Jb%&j>r($u<_XiPKO*vFb3gmXeiZV%2kc7^@${45ht|Stu6Q;Mi@{R|Gabm%BL< zWwFg>6>TwRai+`xYrj&;L{#Js<7Bw|Cfm{eJmbL2+|iMYFY%{tv5EFu$2B8N`z!g0 zHng=9_C#5=kyB{3SfAybI*W|8aP=Mbd@r2SyfsFeQV$syk;ucyRDhJMG1r*Hnp^i; zxrh@pO5j z3umBYDoa0X^Ve_@9HyVvTf3DzL9+iYD{aEDE9|6gxb&$7cxNA5_ixB zw^B857Ik^J43tb8dXbZ>Usp=W`W!4)|L%B3vAE53HqYH~QvEu!A&K<)t63WkCI1wj zX_^p-2DC8hM=3h`pi#X3zbSwFP(mR3wK2YK7<5ZZ_?a}I+HV=y-01oyHc9W)gkThH zqIuvcl!fUN+V;^_m^9ZW+6O#fw1qNGOF8fwn6M_2wf0Ez*|6(PMFLTFkPxG;%Y$HF znIb{8I_w8h{nsdaIb)&1eX8BVSSa3BO>6ve9$PmIvRg2LlR9NX)m~-;Q1)lOcpjUg z9;3bV8OpOlRP7*b>9iFh%~kCX<7P5W^!_!R@tcUK}5CI*dPN(@F0N(@rcjh+~s z7>MHae^)l{&RD4Ma~K!fyl#m>ar&uLf87v>Qy2&T zPi6S?PX4bqP#Z2J>w9*2=_0YpH5{+kqg|wkP!m!%pj1TE6iUrHbUgXT@EActRC^Yw z6LjR3NR0ZHiv*&0{oj?9=r1b#ygc)X3b*|d`?_H;cU|;nig(&U>=ni9DjN_kMZ#6u z{7+KbMO~kJG{m=AMh|vXzp9SBWV1L!AyRJ9RR-9A9H+pzV|RBQ?qa zlnIo=qT-N5AkK=bO`$kv#c2c+yA-G-KLNUZbTrsaDBF}2v#{93FXdcG{QYDepIJUA zs+#_j>@*@FEB$ox%~>-}^AC*Z8utXk#II3e`)Kg|lUymhKN|FE!X=xjkznZLNboG@ zRC=k5P^I!6rI*5@s?a5ZYif;1kXD8(NKmIVBjM&HqFIg3Bv-%xH~$$8_N7OIgN0}n zcm=c^N=iXEHKM^Bvbi{|BAS<7#9h2Qv!ddFN{?OqZ zR2u5NHyZST+OOyK4^*T-moy=UNL>8g`RBXzX)~wcvUkrW&#M%i%gkzD^7eNB4ZVTW AdjJ3c delta 821444 zcmb5X2YggT7dN~!_io8<%92JZ>?VPP-a}U|h}Z>tqgb#6^^r#(yCn3`ByfQVJt!qo z0|7QP2~9!|5HLtF0#XBnCMXE+|IFRpEaLaQzZXqr=A1KU&di*dIdf*_X55BcwqSZL zvzuQ3_ycA#nb^5;_PthfCjJdDjr*ldbO=TL|4e<<%mD*3O?o>v#XJCDQ{i6rgu+~U zfw`vM%KV}c3l93|^-tdI{hyEeerReAR2MIS!{XGJm}B%7mJiMIqUJ=k)aP0TsI8QA zx1MIL%UbJCTNkl5`rp=;mLMCq^Ps}kwmOWp)q4g#7OSWVQ+PYYY*s>I%?e|x%7Rq0 zMb*~_Md=5ES}=?5u-Da_+cT2g3bQ1cIp>xjv!Yt%zX+z(M3RmEy?C_&6*MP94LtXOOo#<(MaGkZiB3U!zqHWS#`Or7GDMEy}m zhj7Lf3ulZeiUP5$jGOda#{-tSF)=ZS9&yx)QmiJ2$>e~<+{RfLGn;t?(=EXb8zpix zgtYEnWJs~Mf6_YQudtH~VfaBvcj*;KBoipgwd zDA>*w#bQAT_-E%9h{T;dJ_fC&0M8l0EjHDnK9ri4rdI|}W{LW+kT&}Ikk}{9iV3Ay zoXW!%CCFk*2m`Ij%tAs!Ojes>wy+?FS+Ob10k|zpsTYE>P3CT@*`ZL~Z72)>p|HddZYE|SA&wAm)UD2xAdY%kZ1}A=cec+&g0UbLWU_It zCX*~G31bO1^HiIvKB?j#P!gk9&}#8d)d~&{uT$sg?x8)NQJ<|BjY#>%F<{)z~ zB|-08?JL-8|Ur&=%bs;?z1g8<-q=ugGX;u;S20 z&L8B|Tm%B=8Zk}GCG&Hg8T!EF==zY;ZH82v2|@`5nRz;Q89^J~36VA#TETREXiOu% zdb++mrjdF#(otWyDqS`8+c7C@q3(`t%?b-&j{S|Xp@r?^hci|v_8wE}>6HmZ{AQ&- zH?cRbsw}kD8o@&f#!PZHVJ5d-J2P~WKHa79u>%V$TxE=}9j32LTFf`k*WXU=#;?xO zmn1jgQ@_($o92Ahce?v-oZdL42LJoIuB9}O+IDQPQ{&Eh8tdOx?c}n$%-ZTQJv$|V zUtg-vP5CnN_VwXDf)G{Wf4o%Kvd&X1xDp+d;SO_g?aT%J+qzkNXp!zrZNldi=~`-Y z%i^MuPR*?^y%4SEv^47zQvcv1c7Y&P&l0ixhg~2_(2u0W@@2d98)>chj$Pt%DvK!8 z>OEqMD*1k(Q?uM7)`&y;jApO#n}hV*&D!$Y*Y(!TM`so+K%ZFeq5FL;YUD6LAq<_8 zt!ChNi6g_=PAfwfySYnE+JI zEe67zDT_gt{A$eDf#79`4#dK0#KBn*EXnF{DJwuFYWfgIsu6#zK@PuI71f9h zlq1|(Pv6kuSTQ}hoW=VycGRs@4sz2K%nrQm9>1O%0ULS zeyi|Yy9ySy;P0V61@`efyhs1FLvud!LSaNlD~npS$KNqNifw!JCJ$h|T+rWqK=AJ` z=(ivEG;%IU7Gm^MU1KO@>%$&Q4?j~fP7Zrb?FNnG+Q|}q_k*8Uw(Wt859m2*jrqt@ zf9ocvJ2d8Y;-|CZas510J=WlwE+GqU>nl2aMUB;5>&8z_mm~Pw8L~kq%#aQGmlnmA z=+4ejk()N6=8=KfXXoo}JIm}VJ9lGi#gc_APIvW>=BM%`i^@F7VnCNLHc$VyOI+t| zWzZ&L#Dt_nt{Sd4-4RlfNqL5zVRi^f$0SD6!>S|qwdWrFT$gnAqwanvJ?eNK$^2bAkpw>P5- zDVNa!zK%M)yYS;jz#Cmu@#yE#Q-%!tXJ?Q7duZW@j}@`VBQ!DC?vZR@VY?^Jveb3! zP)++im_?)$-dV(}Q0EFCdukAmEX=PKw2lV#g=y#+h3NA_sWmu=G@LGXRH{&kOJ zK0aHY)x*t~jnFsuNa7e!XAn%=tOq~Sf{oQX;dh1p$ul=1_Y|N}y^}_YWV&RXp80GG zc2qBXwk1EdK|lL!3x4{BUhBE)nyg%@+O9Btyq*nuHUxlT}HP=X6VE-lyJeqmzYDMwI&8oqKTFn0%uEm?v_ z2=#)YhutJ}*v7CE1ks>8> zwI1?PY|KQOCcRSy;i4t(`+fQYFD0@<{k50kGPmR-yMN%?+zGC0%zcGP!u>yDrtx=| z9XKlY7(x9^ZSH!m|9LT+R|WF214HzO21<4FW7xgJGC&c_P7@ZW*4Vpym8m#YrIidclD`;x(B1t);Bys~m{Vhl$ zO**YdzS5X~J5qn>l^1Dpo&HKU%kr66i5kn5==Yg&c050mW=G!=n15RT@YR@@NyN?H zFm{-C#BuHwea5RX{K8_r=+(r?A+$R9RgLgNM(Sr@P2*#J(j#7TM;?Ujdap!NM(B^c zmKOONA^fdK89m4JA+Ocp+ehdNUTemSkLkx=OXdZub){z=K6iv(zh^VPX0_h4XEH0- z2llMXZ)EE~^lZjX>xX-$@P$it)~gO*J5o>U1-hmBtG$xh5IwV3U4AZGpVzAy%h3<@ zN>PsA9XP&s3O|&s*X`X*xpH@){z~s8{>ut|K<_&I*OB_1-p%-#6?%E^7U?$@qjP*! z(@buctbWBJK>ZDGb2oRr0nF>}1`>PXhOYIAfE#Qhxn@Bh4Cn=d zX8Py4{{U=rC&>&qXQLhgx@tg2&H>anJTW0W>iW=2znvCr-?WBGJSw>yBWQH>RMEqg z>BZbGME}YPxUhDFyCJG?Rs;D1u8GXuQRX>5HekS^ z=m#WpFF}2?FETvk(kU0Q{9`(J`E|<%Ykd;o3H9A*z0Vtw5vxa|o^ihBBE%_u_!}+w z)Y1CdH)0chT8AmtJ1$#;cZ_Q+b3f@ayDZuw{qh@8Z59kc2>}GnTscHV1RJP^K9p-5 zb2kHnRuZ*tf&Rdou{C!Ug1cfA-Wv6JP$JLjAHEq^|3<*vOSoDRcb%YK4QeeRCr0SY z-c06YBlMr&>}R=|ivit8+FbdQX(Ts*F>75l=r4WU zTd__4-iy`@X#N*mqnP^%NpUd{B_Ubay18#MzcO5}-?w4pj(d1hY1c>S8Ng4@*2ngp z%Cnc~4d0H*%&UZ|_&de!wYA0))YprW`R==r#26PaK~$|6Tix50uql&n+fAlmw_&Bv z8i~o#;cO0lS38l7ra3aOwJ~dse%RRzT@N0rY+=bGH#{R#Gl7G{6wDkh+J0=^f)=@# zM7x8%hPN0m)wMhKG19*KIe-d96SoN^7?=h(ve5`M@322>*AV9Dg{nXd?We%pl5qbP zRK-vSpRn4w`=}O>(r{T}l%O@hhCRsvo(20%8QSP_J@1`&W3u(Y(a7A7$p*cw4S$vZUL!0gCIJD}d>${SEuc{hVtsQBlACNqPNq20@08 z&}YA!%y*8|x4qkpP1bL{8yh)yG^pYO$JVMNdfI!*eEKc@$@eDuwkFhdK701uZvDo4 zZZehQGHykAjp9Ip!@KqR{oG{lKG|=W&p1BU?@gcS8%Rs0@0;%@u&BjGlLhYb*N)I9 ze9$^PXPbBHVx_GM(?)O8kACopbs%kBHk#x(`e~1U@1lSAVSPSyy}tWHH@|jKzx-hW zFS(?L{bwlKpzr;%1^<0tHEB*uRX_8e$1Q)9f|W~hn!L9fJ@}(RmZ7_Ws!gk}WgbQ2 z^vib|^Uc*e(ZXT<4|B`zUBESy90pb6^rfHF=d&U1CkZ@jyMF7F;g$<~L6{~9a|0yV z_rGKJI4cU*ej3l%RK5JO^L)y3eaq+V_>Ze}_QiP1$}M2fP=7r$N?-iN(|rD9{q7gd z`9V36=udt**m8U)2;8!SVTQkgHq5%~tHyl%D!uzx-7MReN(y4J?5kV+ua)|eubc4E z9=+o0=9cLmAS30jwedG^DjN$%=mlRlwv-iM(u$(WTPi79Q&NNv=*Exj&|e+Ukr#X` z2J)~-eer-Mi0i)&XwE0^G}^LmW-GpasopcQn`OvSW>B{Kk z6s#JZ!pjcpzm87e*~j#PoVI%Un3eqWHT~k4JqQZdf4i5t@_!#MH^NCAa}TsB_3?O( zWniM>TJZbYwbO+KIWt(R01t^V%$H7b)>BNam4atzPU?=O<0?&ChQ%RC@)ziX$3Gr6 zZME0GM$^jdqQax&Yq8AbhekTZKzj`RH5UqIj6}?g<@X9N?J@=u^nMX!C z8<|Y)WHn`zUIWD6$hp+Ww*w)+5M=n#fL!|sGAa-<9T}RKOzl|(wN|AsyKK9mAe6Xr z!ex%|B}-+)Qh((EK575EZuo5Wm>6PgfAlJ$Tp_);`@m#bwYp zypMJj(UJm6+Ux+@IKA!E`+09Cdb5wdw4|DMoIdYg==a=1U%oAnK5AO{eUz2K=TGp0B^s3)y z=qP+>W``QJmyUsHwF#CPrc-k8YQ?69lUO{aLiP!@#LR*h_{lZ;je>{y&nxu@mv)Fg zw;wq|oFDk68gfp8aNN=e>+YMdaW);)7c6bc0Xn)gmH)k8w=5f7UoRW(Of$)Y7ffn2 zag74RKZ_$m!wT!wKXxDa z!G9&+P2{y(TFJUAB|$2G{HJIq0r8jP7wrgqkt=G~vQWlveT8rL*21*)6M#1v($`r2 zpALBkhF423%gF`A&sEkZClE5($1?kX(FDuwbr<=#Qxixg`GKt_ndFt}AFYVwxm0w$ zy63zs<;);+{2o|{%yIO9%yB@L6YQk=Nce&S`ciQEUcy_3`5Wlx`xR zmBH2->!Blq*6UBKe4@k7Kmnvkn*$-T=(T~6nZA}-E+u(5m?Zg`y6WzI?Q!$I_NWSk z%}!=11tv=}$n@@MPio)_b2-~@Ar z#sQ*{L}-c^tkNGX>h2u_4L1(M=7RZ0jJ6*lYCG`DWyf&qkKv|8YE~Bl44R8YPsIEP zoPV^8^3`FbtV6KWT-j=W{wO=_vLWKKRgLQp++~aq<{vY+b_R@@i9S|WchVrx>#Yv0 zapT4isg|ZKq`_Bi4AGs1wF=)}y;^CqJbN6S2PTDUwol!ywUypv1SRWgMc$-?lJVN= z?82viY|K)ReTV&t*RKY{tG3If4JIG2_bfn>khNF!+3Pzc%)5gfRf4gXF~zweu$`_e z@k8zo8DAV7TTq1g#OrneK3X$L?37)FO^TI^7S@gj2Sme$&lJQTtP$IN()f zuKHT&!~eyJ6?b>gF0Ln92dCX^q$`eEy79)2)I%R^j7jjWRV67I&Ucb5zM1U-R|n7) z&f3_G6XChdsWGR)20PKWtBI(B5}Bw!zR}Jvt}lFM%WDc>lBE}ve9n)o)$4C-r0l&r zRDWgLdr|*50bnj13Xjywx3wewk=sShGg*`1YVfKG?caz5xQ*LCk-S;O_=;|>NR#n>-~4Za$BO$ z0<@BL6fMcD7kge}W~*G-va@vO?x*;yqk5m+AM-(n^aHya@w-QKbsNlhV!27gs$Qe48Hd+0@zCd%HP9zO zw8K$)!J%I)0|$Y?CJ9Clk{vPnNH^VmIF9G9)H@#bia9;{TcR`nYpIiDzQWN*-eFcc zIKo{6edp0cz2-3&bz0kFUt7jcL&qAfrj4>=MX8;c3+=y7XI4@k%92%4t8izV&1I$HQA4Nec?fBvN}J-v_TU(ko1Sw+B3XQxH4&@o#mz6P|pQe8Eu5?9WqvNd|#x#x)P zqjTeo9=4qmboi(u>qqdEg?tt6XZL8P4(8Drh zC5D4lj;C)|7B0Fl5CeX`-n6QlWzq~}N|Vm~4aRBd))_S59LWxCJFbXmBXR9Rx%@wp zYQyz-ed6W+QZ)2hs%5#n2BD9;ov1&1#p5GLz52V4VEAm*fP=uLzk01UmH72FjGX!U zyldUKXSQB-tqH$ATb>}?nk`59f3DM_Qn=xI5c4rPvjuacBALYAkWAX$kWBjC=*BO7 zuP?sQgfB6etne|h=q+wOW(d?*UvaChZvS(*k6->$sJ+3j;!nws{U!M|{0oQd-=j4) z=uQUozphHT)@i2MQ`gKYRHCxkK{X{$Dk@)YoT;zQBRUUc2w{BFLGi1Dz3ufm zMPS}LbHURimAt=3h57{Fc;kIrCU@T4iW!R(du$SuW=lzj}H`Fbm`7fA#c@q{v^tdOnDv z$iNEE|Dq`}ro!`0O^Qsf@C=P%53uN&2Ow0v>I)sJA8_t1?_VCU~Edte314!*DKlEiB`v#8q%I;TeG>rG;VHnVUO^&bce>ccy@BS9I5(4Y`G(dJdtiET%w|Tca|LL zbxiicMO1cX?p1aN z4pmRP*I2B=&z=y^_l8!X_^*1idbSHEupfbwPxS7?62-ROYy?U4Tpxy;GV^zejjyw2 z3BQt)nmZfCIkfB{c}|V|cywCi*6!{XVQ;Y89P?4go2-$A8mPm2m?~$Ik=jIh;tda^ zTzcViq*+bPVC8ZSI@S+!(E3^aIq1A`$?NhgNjCR=c1?;n3v{xcBq!hT#vGX| z=ScBZfA)rD7hOz~j&{#){n>FIUkb|xel%DtI}xKRl&VW{nOw|SCXRi=GI-f!@!0=Z z2gd<}&}DXx6XX8J5<~p}jeQE(+W)aOZ3AE#5*(7G8ao9}9%jYG<;~JI@V(+zrk~n5 z&eP~qmd2`di8Nr<^Ih`t-H0GAFX-9_Qz{(FMakT|>lSlDU zN)Qb{Y(%`7C1~M7jO=?scM6fw?4!b9HHx57slDVcLo<8fG)Ak-!Jsq94HLu9#p(qc zmVzH_^MZ$qnP0HzOd7k@)xgYP@G^`df2I&Gm_}-~>~f4##g7^`USKWdkiWNt()bOB1Fua!xFI%)#2hgK-@aI<4hx!;H^^TN$aN2IrOz8VB0oe5_bJxP_6rLF*9@ZtlpG#csj% z3M893NGr7R6|$hFM(PJcq$Wm2y-ZRzHd32##TfY-(SoID*ME|@hDJ&sA-ObwzXZ60 zJ0z~Yftz_);_5lHOse$+qdsX4EeNS`yJf9YjnuSJlC-WN%-u4Ht78=ZQ-#E(7)-~( zdjeswQ6Zj-mP=fcks6&XQ*NVWh8g9-)nLt};*BC|8>HWFmuzZzn-r(MV4CRA_MVhQ zCm8T~J7p@~NDZZ>ng?UCprTJ04Ik^!wiXT}c#I+75u==%MxHz05r@p+Xao8ED#;?s zK%TDc$d{u+lL0t+o9%lqpd?0v8xEX_oB zEr8DEg_Yc@&I`Ayq?>lT8U|m#qP>UARRKV=mvHmn)r6`SmV2ifrg&kM0Wj``72mC< zBin%loRB3tu7)96I!yFk&1we^DK;ifvsOCHld~F+n~Wx_CyLjy!F)o9Xk5hVvI*k- zb(s5+oL0n=Dcn%R5*w$mFB=`UaNneA;Ou>#m3!GMq@gwTvvFJx5l^jSF&3C; zpE`B1aX%J#z&_X+6{DD(y2n|Dl`L`@*`|uNkb;KV-*6i_SBqT-SeoTr6^6Yc`cJXE zL`z(dcq800;kxy!nqLmr>ph2`KmHZ(op+Q7hYOI&gARAJ*GmW(kDn!V6#nL2z z3N1DYgZdDBtx=fQgqV$MizpiEa~x9`V$d97Hs8nt%^~}+$jAfDA>of2#X@t+sZ0IV zLuTbF$pUIa1&zEWTLWrCnYJ72KGcS4{Ou3Y3V_;3k&ME;=EEhw`9QewOC%R)4T)7X zU8bNlx$sb+XsI!Zb_cgt-I+YtYdUyF%366%hm&}T0#c|9c14;#Rc3_B1S55Rl1zC` zhkQ~z`J|9)y~IdCV{Q+Y<|nj;gc!X~a)-A3bV%lA24_C!JE83hjTBQ4!}ow|U=Lo8 ziW0>~;Oe}77;_sSX-8OMZMX-VF6R6S%abc3#pH0pmeA<#kOZ#~Zymw96Og?zC3-w! zO(e%c*iokaE7CY?W6lr3wkfgQgD1m=zV5wAn}JrD6F16MN3&qps8!vN!h0>Mg?Q$H zs(URe9qS5GXcpW%^EL_g(ONe4y?~=hh{Fn_Z_q51IEq`O8mDK z(#a$CRAAu--E+Ue`+`m7m%p+9kY~5*3~Pw<8xeb)y-n$X$Bp#mQ!Ki+9rt$>To9#` zF0{O&WhuCei*AvRLnn)q$Jq*B;U(N(c%lzqcmk%>98rFPwO|WH@JR#(qSHy%Ca~mM zXPGO?%fB2owpH^#dJ@hhpnCnz@Yrs$`1E%OUwlYR`<;yqWbx{$`-JziECow@HOsoE z*;Zn8>9oPpdIl`Ft`LpRU_ZHIg?OF9JuAdS3iq!N+s?2$fklzDhI(RicCWNf7m1Gc z5}BuAPJVfoz2s{gw41MIqI`{0c@`zFULnHIu_b{eTt3I1B?%us53>T{=JTK%zgnC- z&t9c8dNNk@se}&+iJg@|!Y_KIlC`IFlM6;SFTa46pC%4nFx-hUPt%J=o@p0>D-;_p z8o0X`jVwbhfz{sSV*VvICQzPtstkF+qq(?P#d=UV+GX&#B;LLpz<=Fk_95XKT>+y% zws@Ys!qzfge$7+k8k@cKIFF;u&&r z(O8{E<5ZC@T^frD2AfKwb}!9`3cZcy@Xc}OUMwCoYA+e43r6f8v@tj6F=B@u6+Q2= zy5Yar9APvY!4FAhq4Tg!Tz*n*D|X*yO_)dAxy$02YhoO)h2L#?njL4qypd^>&0T9^ zKe*{i4o;lNr`X!Nlr-MQ-9fZwd;m)lFcaYs7L|+_uw*fv^CSetoJS!z$oVHh9bHNz z(|G1i5zi|8mq;&CHh%- zdUFSDfn1LEY&JuiVPd#_68>TNg58$~(0WS6TdlzpOVlW_-@>~xhlsWE^spkx;l2E* zXayCbmzBr!BeTUf?%qSD5bJKHpSzfd!M9DuGMP6w=w+xY_^~vhEF!w z0U;Swh7UP};2RJwyWH6RqlTG*8sqJ@Tn1AYjgZUGC&>(NlOq?(lccz>r^hrcJhi_vadHbkIB{3NUE{n4_ z4a=5p8wE|>*zqe`N?L}?>KuqZPvZ^I>Y=5ZD;Qvlkkk`-dL#JU}7GO29IuQP3NWe)V7pr8*eOu!1*PQhU9Us7(;U4MUQa) zdQf%V%b~or#Y=#Ar05*RlN-eGPBd}O=9;aW)XSI*SDev733!kNp<~=uGM?ba%?aZ* z;mOSp<1I41G?5046EsjIsZ>_zSBzhitQaUUq)pP}Y2ctu-_Zue3N4KxZIa$eBZZQr z3z3{PopGN!54u84`72ery5nD{ZsGs0p11niqPmy+Y7B3A(bJ?I)YG9*SlL-V?I>{a zhf&2b;k-^zwUW)O!4qB8dbM*Fd>x^_`z!RSQ?@#&YKbf;LG)1~@LvaHzG6U-%72?B z+z~v|2|3a@6d}eqT8a~uHF#`C^ft-~&}~xWfZitEmePj@O7GPM(K(!d?9hBY1_X49DaK?-x3yOVe-l6L`id?arjY{$$^clLf% zv1}fQME@?X3fqYf8~qIgGEOPr75Z$D_q1t-A4=YW?F@hG#pQ>h=^UEQhl zJ}#(#Qdf68hzZ)f1aWc?M&IJT)D-OnC-ES=t;`svR5+(6yh9qH6;5~WO zQt@5_ZvmrVdIERDMp>W0UxBwEE|I^&hadHPn#g-FzUHdfR*QEH*@|PjbY7AOJ)2)C z;%oCH@*qE0n}1H>s@nY12km%u2jc}-IboYaJ0n~nF1xd-D?-AXx-3`{!@Q5vu$QTo z0RyfUxD6ri+19J#Ll5l_+W? z@f8MbZLua9)9j@rZnf3YO!)8ntKv=)k2BCIVup)bLh@?4G3A)tLC7C3x+n8EFR^}A z{5P4`3*Z%Hn*tLRmvh6#v=rXhhsjRCK7Gj^3?J2{-j6YA0|gGubB^*47O{1BGas7| z>O$C$>p;BI;@dhfK4pb#3&0)XVjW(WFFz%s>+;qCh!&uoRhPF506EmsHts}WPvyxG z%TK}azZ?r?6IP@pg<#T76rECegFy1O%Qbf$QnUii-4yK*0(HgaNFJ4=*Ge~_+nND( zlV|u1#1aBF7kg6CJAoW)iD%Pz`^VR!Sfb@^aBr|vKW#J8UL!X|b7r`j(OWFc2O(r2 zHa8LuIE!wo8i&%yg0=>38ar_1RfILQ)BI!ieczW2pb5#L?L;(6MqSbA{1HI#2+NgdAUpMQ)iH2Q z(|HHrFnuXnW_~28oPi(_TJsW4TzLjoA<4B2YlcbdfG4nk=P+0tS_#@fD>5!`ILMDc zPjP^zEaj9PcPOrra9L-(jdzlul_1Op#8;qJ#WRWbG-fbo?M&B^i?~dq+_P1`5EBaV z=#8WsO^69?F?-LY%CYr?NU)4)TQow^9{r_&2SU99q2!u%NkO5y_!Nn67eT606E&7^%?c z<$aSZ*bO+y@b}S_nw*ua`Z~5RFNh|{Ee3VH^FjcHLtA)H?yL9UZYmkLiInw{+oE)G zS)|RA_iG*KejOMCr5o|E71yuieH=wQb1XnuR~>Pp0Z+q38r%>Y$G|aCTYTA&+c5%% zHsnnatZc|1j*%x>Pztm!1|uBnMzpDjY{Wmp>^!s)uM-AAmy+J(C07cBRC-Q&423CByxg+{c;m zWe@D_C$0^rctNRHG zvuyY@0m6YceB*#OF?jX>&hA=|wiY>qdCq zibviED{;2|JVo+t?iVQh!RCIE!udA$OB61!;i)*n#WwdV6fU&ki!lh7+uW~Fc*E-M zN#UPXcP|QWS>3%Uylr*&q4003`*jNMSlw@+m9u|FD|6gMlFwkt0U+{Q^0#ZeNi+pE z_gfS$wYmF>+O2qe^xL4#u~ZXh9fZ=qBf7WZk?dX3w-xXD(0i2Qiq#FajZEjP?tb8T z<^agvN8aLqycGoe0KlREU^xLl6c<}zbo@uuNawMc9}%MiHg|t&&aD;UJs)Qwld&-Jemo-sb+C!YwxU7Zm6Qj9MTP!I01k=iyf?Pz6llXE+t1pC_cn3m=p*P~&#xWeh7Dq4}^&1l|Mv>_C4 zE{>)16)nbG0;5xs-aiok>uLJrBqyQS>btok-C)Wpn~XGhdbQ@f3eW zM#oX~Wf{$(=u0v>mZC4p=(iMoK}N?=w4aQQrYQDRC~g!*AC=LO6n$PsM^N-R8O^3> zq>NgIQ@pjP*B0AKJp4xXfdomC(JVx1Rb5LBmW|t-%PI1##OXx#OncJ-3crw@P)5QmQ$j22o#ssBmDVv2quql;d{rVKB(NQ8w1ZX=@$DEhIu-j=(9 z+e*khg2V}TJKjFHhlJ!2q`P>x9Z#z9ReRH6NYC6B%k>O`tr6vSw+cr{K=b?d{_LZJ%6#=`+jK7VL(6dC$lN} zp)cur09r?SX|%@)D%>UII8MLju&+hk4!n7Ae~CCvh>yfu9r*h}a#e9IgVpMC z!*_6@15XU~ZdORa&DgKRbmV;_AHi7$$A>L$V+C$(V7ht6b>t1X+NG=OA+g~>-Z{Qg zI$uac1w6y`ph47`wro$iG*PP)KZxDQ^-i$TlLhAgYvBbxLm~}%n&PU;8Bd+g{5d9u zXC#EhM$$O7yVQ0j20snH8xS1K&9ElyVpbO(ZL_-!0&xuOXfx&@2O3+!p(*-f%O8JJsA zfeG@6RS)q7o_$?;EQ6U)`7nPQ!MNAeSn+;0o>s$-wGyAA3Uf~8?oix4w2L|2xTpo5 zrcboYkKGO|xGG5;O|an8U|9{S>fr3zw{aZnho~6 z|2gky#twG=H@pwqD;xu`KV30cv>3qa;7hXNl>z(!)xTLLpU=bOcWlXQwUHBTzBo6K zr$?Q;jyJDre3BX6i?680;{r9xYc&Xr5cV9zb11wth|@8er{Q28&-n5!o=1o9P7DQ) z9Ljfx<&O03QcTeGoROY)hVftMv5`16f+vZ-*?a&ce$VE2y}&#Qvq$m{;Uz;io+02G|{uW%2K8Q(^vEh#qC6jpqFFGeqO~!H0P|;}$f0GxV6?3NWF=4)a76ija zV7kzz@}Jm1VVlPPi=1CggM%kTBu(c}v$mf9PUq(t%IP%|n|hwTRE)~w^Vl<@#Vr1Y zqahj3Rx+PsJbAM)ebDV<+xL8HFh%w*;TO$($6U{b6}-NU4HuQ`c%LY`-JQ;J$O)jN z^CAlKmx|YaaQ5n z+=P*duOrMC2^)9{-#A5d-oPK`=ckB~8+dbGHAVcmf!C_mmmq`q{082RUzs9OH-ad4 zs(6*c=~Knnjo3S5-}BxknD>0#W^r&QU(ZkE3bBi~}_O7H9 zR|tl2Qr*rcO`T27iK63fUX!jSys(=;N8!TVycOxliQO>&&*zGeJ-mL+ySPe19+7sY zapN(<=?>+m+TEMtsXhFe$cywrF#lIk6ir@=SiXnXi@tI%#)ac(YO$+(coY6dt|z6G z|Hi}e#|5Zx-Z&AtpSP}V0L2(Euk7b>B-N+;d4GhSv-^1;Ikq1!=V1Zk3%yN^``#~H zD!St^ubYf)PFFBG3|}Qq=M}4Iio)*<+^G8!P7y1K^xR=SmiWJZ1Pk_`hf#H3qV*Af z0>{kr#quNk9H~#=6S=(e!dc1dB>21$%DX5&4^%Xn za#-+M?s-p-NIY%t#$~-zE@2M!-6TY%Ag2=@b==TV(F586jA|L}Kt_fMZ&KEka z1v2lv=bWOlz7&n^kt*PDvOf%a@i!Q4`ZrH zvG{_zh-Emxtp>ssYs0B~223p#F>KYf^80dK|%Ni)F{y~NU0x_;}-|V#=iCnM|*u3qzrX|v$rv&>0~4S-A$a*YB!-b zF6USgVON@gaaX(2j*Sypb|ngoC)pJ@Qbl&9HgM&3>5IaMLU#Q zHLHtI?kWFqhmyfZj1nG)(i%K3Ih3f{)vZ8oa-*X8)HV6&n(G@q$-zo6zgKVMi&9Xq@ggTwiRHKQg*8-Z$!9GS9Yd9v z|DTNc%S8WBrIs&a`7*H}RB`jm%f#+brFB5vAg@;!scjR&lw>qer!b`hKQ~DX2vg!v z#mQkxHa^iUlAUPGQKFagp6-D)7RVycsq{zlcp_>j6RgUtyIAI9l+@r7y1H+HnX8qR ziK8*fLTeU%Q1{FtkrS&tov`c&Xl4;UEtO<^Jq8CiIFi7pr)h?{6|2cEFw_ zQB`p{L5UFS6P2gfP7zuQ%DYcALty!NGR(x<#3Pa25TDgj+F6E80Vq~nNKk5tyoQP@ z4%LDRY~CS4YbzgHPSCgALX1KeCgaaRTqZ!c0q9*1QVjV?Y^#lRb=psOAPR9;{3JTK zP~Y7@iOdwmZTaOVWQZ^_$ogLVNvtA*VOvENg)_H`R5!w+t&|}~WKrEVZxsXGP>h{h zML`{eySIw1Ze=3hyH)f`Li!l+2rXB)f_pS^j}q&W6dNB|BDN+$oVg|9ZW6@VR3h3X zgL_4Z=%0)XRVBz!v${e&dzS1UF@%;Ji@PT7m0O|Y0wVQ zsV)ScxI=ti7lQw=!=PWf!=NwRAqF&5l5kQ<#}8wRA&4T@HB{n+ma25&JCBKBsfxSS zcziYz`iuiX2d*HD`zgz--N8uxy-93IRl4)TM?}puCAHmp9FO9f5{`<@ZhZKdE^?}L z*TF)mtuh5Zl)6S2N>v&qL^<=I_&iPd!7_Udgs4^Bmex%K*kLjtQai@374W#VD=O-tX@;&7HR~gswN5-m;f{4;D20{l#99hR|0u3fxbjC)zX8HiKZ@5W9KK%U zHUOWZ_0$?E;zk1{%6gnW3O(+Ih-j!Z=4;lAE)79(RZ_I!=a-A3hDsB@p;(-4sHE~~ z#Ui|sG7?t=r#Di1;KJagMoMF(YBg5I^S`phvc^jHOwVMxv#Y6eyLusAbmp2F9}?0I zmD1%*jnl>IZ}GJp#4YqXcwZ4+#{>1_NrUULcaF%onJ%lGUm)W_ zbX8^TW^Wu9Rrb%5@u!36p30>OUIeZd+{yCBaYbbeT?6M@5Ov(-tr9M;s9cwKLNyz8 z{u%kc3vqPWjKiX|30eR%7Hg^`cbNPqo*2}ldBRxsnY?Glv@?@nR59mSTQvfYC*?lv45c7H~4G2-zTX`>R zJMQgCOBIe5tt4AK(MKuxCvcwQb%n!P!V_I(4hvgYeFx2?&8`p|-cmlN258?`8A@UK zdstAWh*N!)F_iq@+j!lIPtZ!7eb3(@Eu#T|W#z91fO-vJ+hrP&|2I5FTI#TK1K z-(3v2?|{|PPjKg5OiDx6i{f{br-@^YcQMuxwtQFll){DYDoOX2{rL5q>|wfB@SDU?+iCT_o{^yNFVJ#S_x!#G7=-RPexX(t#9{NOIFkt0LLSDSFbUR@J@1cF zMg+0?oS8naN=eJ2$Jy=1EN-+XnAEFx=ur z*#@PtngDLGC)Lm-OAX2pBerc+9%3n;&`ruz7M_ppl$#BF zxsU#;PeG1Yvsp>_7r5u_W+jg|{`CTk7M1=C&!f1Eo{Wp7{gQZ!NlSMt9JdVMFJxNV ze)u~_CQKNug=Z@YTO@98RpzoYp6^SP<_b>R4(?DIu|*zrr?SEvDm5w_P95jcD986= z;Nwbt8tPZ~Do2CaVh{cV&rtTvPVW^=UT#6(R#-B$vg(0p(aL@3D!ToRgDb5f5Pi~z zzD?*M*t=9#i2MqrL$r6Zl@!sM&n8#KLb>S?e^n^1x6j}VQ?|`o>6rnh$_FulbSD5e z0|Au)Wc}vfBU!YQMByu%vQ+sJM~LG!ptFAyS-&Zr!j>bSoFIv8;cuQjzbPp!vjXe^ zB{PH^F|=jzM)7XmSGN4{D!jQq;V6wGX7X@SP>jJrVGJ=a#6@kDYz!V3!tMb3DypO* z0cTD&W?mA;Dof4UB)&MIv{%a@q8NKzsV|P7z$7>1ghxH8%;k~UC%siJrO#fM2jR%` zr1u&g5{NDU;7lN(!bhJ^ z^vi)ZX`?+2&MVVcWTg*RMU(YSjO%N^i^G-5!w+ZS+}y7gKpBM8Rioh?fa>yGauVd> zmWGYFWJ_Ws+vG?deM-D@0ShzifIluM-&jt|&v%F(I*SmaFDkPL(d3fyJcS=r zDGB1cOUg%HU|rF!3R_tNGn&F%Rmu|xJ&#^iK9aV@nk<gs68_*+OB}Rb?cFPyL}J zcyg~{1bd;4Mdfv+4-y`ro|4cRHTL0L6h6Xi2Tq05qnX zIymkHq&zSW&5jj;d6olkX0xZkEhWi}+N^k5O*Nx3p3luHPb8P_*GcLF?!$Rp&#S2{y7byaxxQh+z~BAzC3J0BtoZZ= zbu^E@KR2f@kdo~9qFe_F%V;%v!FY0;LS6>UCM8)l~97!-fL4}Zpj zE`B{nLAt3sf=s$cODW#a06uS|T*v_TZ30p_zI@eXrm3!}3zuq*H3fGD@DjpA%S)RK z!cnYKKVJ;qIvLeW^5UF0@u^xD8#()D_<&V`sQ;Oo$Wp{3pQ&%LG_mM2^%M>{CVZ~; zF^^#|fyAxP)q3sHTo&@sVfCgjy+c5iV)eM%8n^B&o$umbSZfGx(MI6QqCJ3wLS{o| zm+1S2`mPN_fwoK@ao`Je1zRFUeW^a>eH7S*Lf2Pn42_p|U#Xw4$zsV@YESGsLcUfT z1Wm;oeG~HHmDAU1T@n1X+7cAgDE^KSzwx#DK)p{5bFCBf8Ri;3UIA0h)X4N~S6mZ= zEVd*7ABG}y6urMuo3o$9_;1t{Uf>;D=fA-?o);w+&vnLlx(rZ@7)I^wOm)0eh@64y z7iJRAFDr^?q0Ag~m2jMH#bXI{di(LNM@c5VO;;oUYEA}MB zbK+b5im|Enp|5Nd(?_CdtzzFu%rjO|bClYL=S&n|jKX>9&0J25SafY zdnpy-5Py$SYd6(w?MxMv#&U&AjW}n3ImfkPoZWM+9M#5e5(Ad7%bbRzhIdA*i7zai zm`!`eS$=siL*Wz^gp0^ctps3*H_juG8En< zV%%;;OMr9E#1T&1qhtM(lz0u>A(s-5oxZnGaOsGulS4UCTsA^)b#N0FaicnT;HfR2 z>aC9R2Y(ePH_A~ov}y4eH7Y)j_~4#uE*@#2L^_(oIS6v2d8@{#t)I{L5-g;GfWYs3 zCByl>pCx>>QlcUvghT~IXf$xHBsU}~2$V>S`&Ny$P%)<_iGpv{$k!--U$c_5|Kwza zdvuP!aH62BS2n_H#RGao-jC0tBSAc`a*ag^HXNKJS!uR}irYwiLtQpw9PNQ&80XOdNxl`n+37;;*aU-25b?FW~k5e>$(V=sk#%dO@Id4p>N(V3^RPC6j>wvcg?X<2f0%po_^67k4Y=>^yLCFt1+uT*%@#HRK|nyr zjSB9UfD%l+D6*{_-Ls>u3L zPtbl{1nRYc`O~=%nUPWtw*45;Rh(R}Mbx=<;;MRWg3>0M>b0=4UHlNmqo?{+NtWNo5Xn!Xb-A$HVFTNVDEJs#Fz)Q3iZAX;)VycO=-(j zMdPZuqDq{yL@SG8FNKl|+Ez&Kr&=S`Z2p4l3nU%7%y1_-BSB@%uCnXl3xHdOV35U9 z4s(RME3R>lN)tl=Gp3oHd`Meqj}f(o$~Na5W`B zKsk`82B%%CWkvlrDmN(CgXZdcVG9u7u~7-e6UiRwuLC-yhqzP#4C#%Z8OnYHE)3Ob zC|e)oYgL?X3?QU)q?afWzZlY5kOS%0*`O5!aKLqt?^!);iVLS`IpT)a0!#T!40%)= zrdZQSu+zs=rJP$JHLOgmrw(i9o56bw~6O;Pbm!3gy$Q(XH>L52FYDV~0% zV50JkSpGPc5Zi><{WHgiT8X&t)q>@G5HCtApcjnYI-79O;54*32>f{&ElOWv^-_6 z`2Gp4lN=z5o+;X@kXQe=OE0iVUq?Yh`HleXhq7iVtHFZ1WSjS70{tg z(Xc}6my4+=(N}ANZzSOu`bI&2(d|hbZvjphy@AoV>Pc|iEu!g3ZLrcMI-b>v2`8I}kOXBEnzUj4Ec++UO}(Gr z26N!D0^@|6yW!#Q$8Hne>QN*!V=G}s`)bC8u`ffo!JTl30g7&B2 z5dcFAm4D1`Azw%RJeu1>*Jky;#sPC)&VN z8y_a^#Tg1eyIW~BS?p?7mE)3VkFrd)Le}Dv8^CQw4uU(E&w!ZljCNispq*L21hn}X zZ4l`Di)XYV3!_XSkSV%7tDQ4+j|5U44s)ljVc&$__Vb?wYQkd@ylr7zfh4BT^xL0{ zh0ns^akKdHS?XB40uqtbM*F6B;}dHRJVSZn%8v?i^JiGk*q`f^q+FEaPaPIRhJA~; zaJ`n>?N)9*;$s);&Ix7PHKJy{Hcq)sY+0}MD_pz*aw3p|&UBBcz7T26+CU{+jBM6= zt1oO6=QX?H1rt73+|jH}&biqdqqGs8)^4Plwr*86q|0F)1%0!pTcgwtCbi3?4kUGo z!!~H;*)yyo{asAylqp?E`ASr60Kd6K>_E}7uOXs2{BrfkIWE~Mwy)|b3$n$o4O%gz z)V>W`m{O{EBlfn?FWI+PHk?4g54u-5^EZmg8{q)`^)4{z1^*WR+^7vGtfo{ zztr_YDnNz8O8PO5uU*|!UOla>?oW@VML;chWxlXln{?y&zCRpqUd zb=9D*nU+WCKwXew8zd;ds5)Ex@L56O88a+-)#-l*HUAk|e=X{tbx{4-5<7JLH|{AY z>vIbSqr7UE;v;qU^M^?5>`twVnOcb%G5dL~L~rX^g~46%yjJj+?8>Yup`6OGw}9Jf zHPcZDdizw+L=Nmg{+lDY_=*a1D}BEUL~geseDW>3Ns(B(+za^lbt*l>0doZHa5 zRB-AEj_BNskxr;HXy4wV^(Dm}xkbAku0U(HXh#PmKV27$Sg@`a3>PC>q)a=$MLQX0 z8quPaq~Af*)@E0Q83?AiMH@EoSK=YlqwT$^6AB*RU}>W41(?GiFphiyD^v)K^_>Oz ze$&2FEPFvKK6kd2G6gf>VQ;~-l>z4Nan4Q(R%{@rNX(QuL6X1`{Vpjb9d1k_cNeys zr3x_VtAdD__@dVP1Tv-C94T+`*RVf=ys6ygt9*&aE~68QFyk3Ki4rI$0xn!f4p}F& zCZr1m1hls`FKVOV%n_X^HSM7I;YF>V;ffT|Z!386Tyf@B?HnpdyT69MvU95z>H8~5 zl7`mUGNgzUA@#ZVV2YA$+JO8Z&Pp}qN{q&#ZpuZM#0A@+TtY>x-j-Am>j9+f?2c~= z$_`Jch!G!X*yr&zOX>qSdKbi@=%fH{MWqlKdkY34nkYWE=7s~V(mm!SsV|=WlJ>hX zSYZ3F;*G!&FhNHNThMKAFb3>~L4AM+tll88@H>KI$&Hj9jd{fJdQ;S|fNJ*BOIkkT z|GKL>o|4DvP2tf;b*RtBY3k&T_I08l&o#HQ0$7r_@g2%yGv8~loI@A~{A$jtvJut^ zqI*_;#GjE)xNGvE@j&j-m>Y)4%~gzfu`=qb&4&TR%86qW6=7iUSK{pRZwvCvVJLwc zLVDaA0Yni;v!lBFxR<*14PjM3v*JcDIPZ3341#ngUaoP6RM88UQnH-(5)nHQwT7F zGRnnd2g`Qki%W1kx@RX}I%Vr$`YW4#QxF9yp0Tr#X@%mkR;?tupr{H22r?+Dal-myg>1M_0e}1i6^AL(4WB__?7-EZd>wM!{!2 z>u!=t8=0XI<@%+J3#@pFnb$2j^I)ezr*U&Gp%?-& zMHLT9mDQud7H&SZAcw?MDyI}Dy{e^M(d+@*6tA;M0@{oc{**e&HA#~g$uFTWT#pm> zzOa;F;3pQWzVK&C1L0sc@szt{D?Y@qB8D5}8c42%5X=jPj(Kku21}Cj342aJ5!PFY zfi}duZ)l~bAk>W%c{L$C-8E7*emb=<^-~h$Ruh7(%U3_8x05+M~1ux1C^il(DDOiQs#0$-OaU@xX@piJuH>q zq$Tl_q7I~?O^?wKj*gI+_HV5a5R3c%t>umF$uu~KFVL8dMw;hTV54YIZ_{7qey zl4|3G6G5)(2%+0&a!HH9@$gr}q?(8*dR;3tYxzfvYa)taNkgy9>D>x#F1lrM4mMmXm|0>lQB0KCf05ae(Qf1FMXdyu31}muP ziR39Y(8RxI`IIG()E_Zcjgf=Ksu2}RR3xW-il*5_fSb^<5csF~%g+UcS^N(ABA z91%NKYWW#0{D>1l_}dZ@Ykw{nPyqJW&R*f!7J>Xk;0AK}_K2AFRzY?~8!Al*#(AN_ zya64oqf_>;gVlS~u{V-HAqJ9px+08@u^4b_OP#@Mm$_-LWL~W?EO@cbtHNhYw zih19DG9C%SWGA|*g?Z7&GPYj^i%TlPuzeTK2A9_S=o&i`%03H@t_K_Gnpm_6MF*y! zT3Xba%LsuBJuV-R(6^KUdd@x6*IYKz-&#hR!&V@2wjzI;sC`Sz&UcM@=`Q~#f;xzx zj#9DiEv-1Zw3J#NxA$3(x&&BgXL`HrEINiA|DVYkk%3qR?qHitA!CG&^Uxg^7{_KX z7O7f+p2A=;8AFxh8aKTe-pTqqOJy&k32_Vm1Q4A>^ugV%W;fAccC!oM)s%6Nn7am@ zG;BE_?!0wB0O)W+ho%Us1zOqhlzIQv4=^)g-lBce-NEO2%v(~|mQ~sFtYm^Lj5!az zlmaz})=@6gBm-bYN(wWiAxi2#zyb2O_?45(LvLLf_D{%+~Q9#U^ek&6)e-l(^+P+MH}NzDQ@d3oBIr@dG_yjv@c-d5u3 z-d!j|-J2ZYT37GB9tMGg-px>%QtuAYMN2Nli15E^$<;g0$u|3Tya z2rYOTH`dKvRNbI)*Dz2tH0}?I!NmM>+MoxZQ+GL!4+rVowaD(&mK;3gF{gNOjLQo{ zhtQtuNT#3ZV~iq*8Um_;MmSQfR@O z?$JgTYecbBM&gw~N(~^zUks(so*!4lq%p%~h)pcsgyKo7wYN5vS`$jkCzfT|-rB9j z>!K8yg;B`NLXWWB#ak;XOI^GrVQphuZAeaCCjM2Vs9G4J8dfY{0w2wZI7^k>{1|1yWa!aXB=>#7Q90|k5 zv4NxFRrVU(-9i5a0uWNgqz|>Dl)BiRA7blf$b~L2?Y^lIKl=gGX)iYn6^X7F*9X0z zC&Rl!!5?S1FgoB{oG<>;q1~QSG1cAGu=lHBC@?DAM4WB0^iQ{{d#XG9UlAPGi|p@2iP~i(3WLjqwKZ=a0q-!JbM7!Ka<6$ z2ee~VIJ*q}Q5(`{2tAKC(aF++mk0O?X1_JacRC)K@buUZ2FeC+{ZTubNAtJgsSFhM zPuj7i2o*qf4SquxM*f5=sng~DL=?=Qw7zMGpC%&+z;S%ZPg=h+MDXJ}npj3iMf%e5 zXJ~}FN5QjBeDIStK^*?GHmX<3k$%Oe-rDN z&a)EWf9fW_)i#PqkV_rG7sc+eGcJdOCDKl zZ^Pyo_Vv|>2Hj6}=K52^49wfAmn!-5KGplHO^u?jqKDP`Msb3o!xgnrT&d`n084F( zUK}tSy0ZNf8?Zkr`jc4)?tqaHwu=@sqB{@2C!SUHYctO!nc-ud1_!0EIL43aYVH&} zG<{U;e!o5hQ9$KVHA%I`1aC z40=N>L7%3<_m)MuqYd$X5+dM zY)KFDorP5b?2I04J;dywo~xWG9uDe#0&^vtPQ&}+-$A`nox52Sr0cILbHwNA`gHI0 zbg?A^Nd5kiiHKPrU~O(?ld|-_sOQ=&y|-GwS=^tcSLTwWba;1-_D#aC1aW@FtL94Kj%#Xq|1<;fT!I(97J!nmxvey(&-?xD}* zv#y7J878`#tvmUXo#LyVYW$~8b&CIvfX9F?ty~So&dt`(Rg^jr%hBuLQaveGKMzjV z%X9T}#HqRZed6Ify$Ht^@qOI4a6*s?&%-9}nWnkz&0<^y;nmQI?1kd+W38 zhs;7w55&e#r*q~*;^W@>H3SabxR1}D`sh!4&tt{Uef59v1wAMdbISE2^I%i|a+|f#u@Ie)~ zV1J*SJM^#@%SP$NnYWw4pm)dMK)X@2jndyksEM^l>WAmf!cGT09~66`ieC)7tJrr4 z|7g8gxl?T7W44$vMn6qwgw|T|_89$jrB$pRtM~8LtWE_45yjm+RGgPZ=UDxs!k2wC zugJSbI@pqrozEHIGD##I8Wu=ywd6b^691wL!=@%;>il2|tUr^SG7slzhSw%cG zUjH3V(WO`Dhj(As9Rp|c2BI<_r8WYHOsdfTin9^xEA(pRZc*WYm(CGa;bG22;J3kW z|GVn2?EF}>qaR@CrP#AaWBM!AV%)L%CLAU4Pta#7%6u_M_VfwtIy6ZG+lx@3Rs@)LCgZQYqX`~>toFFrg;KO^-cHc=7#>STSDA|ARgJ16)K zHBA^5;7=PXIaMF3aA+q@(!cie$1$ht|H2ug?_wLK=qD>#lgVSw3T4ElSjk!XQ|i(O zMB7>VpOqiQxU=;k*4D3a^d68pWb;Mh^qAM`6QZ_3XDfJ?-k=Ybas@z8nt(L<#hwkyfop$2GZ z&0D?5QxjOdw9x=BcMqr_vigB=v_I*Gs~y$i)<5ZE{U7h2AvXUB^5J0-xd^i45i#o`Oo;}u{~`#%`{#-Af7TCE zADkzy_%o#SBlAT4pY=&d{_4+2uA3*0xEP-o%Fj9CUl(JtJuJ3gtRJO36-&QFKVQX& zUVfSWo?5p;OsdlRE6<64;i0bbj^M1X(kCcNub6SUe!O2@wna?7LLZ|xZWec5pQY!IJZ2|51Iqp|Q+x~`}xcC}<0B^^fbB#Wt`%`d4>gV%~^g)h0 zIJCKbqj>8Y{bGF@q-d>}@DKeFo{ahOA7EKL0OE+**Xe~pjx*2X;f#g2{93(A#rco6 zYxQ2r)8fZ#_0zo~CF08K^qjQ1E?oSXDBROn$M7WXT;f{`3x zZQ_G?JhM(OG1~8fQ{rCn&pQ3w&;bsEM5oVQg43FA&oJ#AFbe0M<>K2qkncm{q6Odw z4~x4N=!M>BHZFk51Uw&g51v2I6))bSk2GrJ?ugj75JGje6lc>wEQ63)XL*&X9Vz8-V*vZCvxT!4+!$T5--oeWZHt zS}|uKw4D3bik5{aws@`hWFhGCkw=AnpI)p!utgkppI#E8Z?m(DU6h-tzv$g#bz&-V zD}DpdP++ zw|IAvp3@i19(q29f!RP* zC3;Uqc|7+0Lpn~%J|4?>glg^MBELcZ9IwWsRK6dN&3H`T!{^_Y>L2lW_A>pCin2z$ z)2Qc&9m~NzW1>eRmTXPpUyb@`WvN)xsF!25Bg0rI7sV^&$1_*x$3XycVUs~QPp07di(#*sSE4wkPWyIpi!rH3LZ z8QW7b;%pp-3mGvGr>%m@{IvMjD!n*`=@E|PEGg9kF9IbntfE{jL|1eg>}5`zbAA${L82&%`5Z z^uv^&W3R2zW9pf;w_*~syH`L%Jo}u++mat3k9|&ON)B*jpVL~C%mEpD@CS64b%+n1 z(h)&yZ>;QTXo>!&u6g3KwR(YyXbHwTeOOi_*qF2qT+S{visRPlcc^=}#9m(qp_PT~ zaTU~b8oPwOUO&Jy@0rc|8GSs11noDJCcGXZC32#l`hv6fCUIM{-ZyTNh`>D^3vDM6 z%kU(;?72y7Z`Lm@>OwGDuz}_A=tEqROREr^k|%G_du4y|8!0(r(FQ%Dd@eR@aC^3E zgFZ#A+aiW<)K4khk7}h=qYF#6c;pDUq)P>8i>TiS#r8|_=jU`phFU4^d`{0-=dKhD z&*^7orI=h;Qe4HaY4VHuQ*dSvZiTM$xj25Sehwag*ar3SwXN8A{Z<&;^eM{cF?_mS zLDHKq=@apYY=>I;xj1dRempH`i?-{TX*}Y-tWCekuEBY6XOuMNtl|xQlQy)A zXVPhMmxgob4V|eUcxMrVn1~1ooZ^3S)D;<_kE)`!tC_eUj0}+I``_Muwv}@tv+2j zEw=1i{UJ50b$UvREz`vxyY%V|_dXoEpEUgoc(4RgDvA36-UZHPcvlD;`@Wfs<_UwGInmd@sbR_+dtE|6ei6%nT8XTAq_ea zk;`5YdneO)-48m5bvMTQasP|x>TZOJrAgC%n0B?eibMi~9TghO@E%5aIB5Vi5(ZEl zrlSEA9yzpq-q6G73meQsctFpIZSP_9W>|*3*~Z_^QAwjLtR8L*2k=<>Ji)d#nGS_G zg#|OPh6qcq+z-_-gE5JmS7M8cjbf#nwCTW@^UqT<2Jj5u*pGxcR)6^)Hp|(9NSz9I8_`E|FX;&loLN5?uBvW+5#MT zXvo;BoGbc-jX$8^^sqsLaAVlW#U)Z(!p7FX`fsP%PH*l*D?^QI3Z9+~kVEkc#ddcX8^M@Nl#E3qIj0Sf}AEPV<=GlFWTS&Kg zeU0HMpHJ^=)E@WV8pfGZD`>sPewXG$8%t;M8`}mpuXcLSU5m?R7;;z36z}c8ZG7X{ zLv{=-H-5)Ps>_W&`E&N+q(eG1FHd8~auw6uO)U`%`x$}!2bcwsh?7~k=@8G-+207L z5APHE`=eW@_cMCr&t-lpbN_Fvym5fh7r`HmTM6yF{(yEFtIkWU+5=_E0HcrEBB3~1 zUA|9L3^1};D*hc+E#v!_0}ZcNPp|ons*t6Rv6HE$__@C^P;44#^ibEXNz};<>drMH zZ;&xFt0ie*!NhuAtQcgBPi0dI+0@U23~8FU9CMU$Je!$)ltFWB%pPaV^sA4WLaEV<{F?okSnC)|(rdm?Suu{= zlnG!fjoREj@|+Y2AKPh=ewbjm?@1hYwDBop%@04un5t$otUM-xE0Umz8jzr|l@qYr zsm|>Z8zvfKdM>u)_2se#EPLTM>wa+3I9V8^WSJ;C&X~ujB)b=89gpWD%JGJpwI><$ z#pitRt*}otK4N#ePBaGa+503zT9+G7GRE@#i<6Ay>Qi3GoL5dZ_9*I)^`iMyBR_W* zRMc{xFI?u-c)vo<^jl+JoN6?cWyBKEb298$Y~>Y3V}OdppRY5{Q|mqz&tGSJptgJ( zd;FiqDN0sdOCscv$@*7e4KtfdInldy;`c||<^S*R>_;q}Iart(tkxtonH4F~<# z)fnTI?Xd$jMxBb+c{7bEc-=G8$W>t>@ZV|t9`?@*?lfj98^oSFjWIa$T0R?_n@@_f zW~3Qa%8I*b?hqU4|R zjk5}$jIR#3Z2|pajAG;FUpX{;=0nJ@67LEK`Hf0z=Bl`Cflg+Khy8odW7O|gET7(ZmC)pt=GIxw_~t7hcACnhb*(L|3QjGLox!w`WK znrN>GIxpgkxA*qfptH_>gPT_RUl1Dh)?1jyx65&F-Awt$a#MrOx3?$Ks)EkT$()x3 z9pUC=xl4o2>Mb?#7A^@ocSUiK-fQsUpc6}0`e)pwmjvx1Ts)Vo=1;gX874lj+#iF^ z(@C%|M5QgzLEJ7~5Onr!NxYqpK)K**UfOv<=f19}d?jpuKmgmliMQVeosZ>P`nkA# z4<<~n&^bZpi)3HVM(pL^(+>``)O3-=S?GDq<2s*Q6T6*;KptCuN z=FwO`Z(M=?N&W8=)91udIGTER()*H*_|BiV}j08Zq>xc(ZJ#R z33OKmodd}(IOq_&7j?&}BIrDv9PRNzrzQbm`Z!#h=r+Mhj|w{XCYua%9f*F7(8#E8D6XB1xsjXkHUw{PLUDG}1_zx7zh}<$!$48b%Q99t>Q5g8 zB6?iD!ow?lV94lgcxEln3Frgsm ze2DPIcw?b_%#g*h3gRaZ1nQ=->D-{R_Zyj$=+6l{ue;r0xoq$SS-Y&WN6?w&c86)* zgU;Fqh})=aqFc~uTE$n0H)m;cqG>D`-}@l}Jz{PL!?d{gGcaZMo2xp$My8E|Xa`I1aK0|B_jBAH+^&^dx$$|%~r+MA8Nu#>=G z_sq$}+XhK$T!$@sS>g@rAfUl{=|!0f>m%0i&Wo!rFiZW)%OdJG=PNJAay9d(^k6OY z8!rl?DC=f^lr;;Nba=WAuHpMEmfhXF#;?|X7Mqo0eyY>-a&3`$WcT94twXemY=Sw7 z+xSo(;VrR1yfnx>g&JtdVR*uHaQP5(fm(NqFo&AQVB`6uq2|~DtrjF4g#UEK-r@bM zN&c+mPi~vQsq2;4%Aw}Tin3EE!^}=th`t_Xj>coqaPty8<_$MTDc{Dn4>zyDj+Quf zgn2tHXb_VHw4m)7Vcs1&Km*YpZ0$2fuqRt(#PGz<3H-Cf@=@k^vFKrQQ4GIs((I>l zcu!Q+ab+_8RN||^_JJfHRH`wN`M3!*vhLS z8ZU=I>>>0;dNU9|?~XVBLKSvWg;~VsUnF_<~e3H zp9>tfB`!JIoRGC@ zvloh*j&kcZiFHSt+l*IRKw*CIRapSn&mUvvN7pj!J7asuh5q{p7=SlWvxRxOcLipT zAIgtRyO|8k&v5c-NPBgyd;>w#T<~u$;JFxc?x63yWG;=nugcjlF;+|JPI}!0vnTDO zi}-|{^f^Aa-Yq&Nn7xrLbD~+C-|-}DAiToDa~tJGj_ou)CMqW)GmNE|Of>r-^PLmT zzC|*0NgWsos2}m}d_|7tz;CFCL)|*j9L=T9_-}J^MDs4?FuqJa&g5o&?8@WJ->cZ0 zd+7vBNkqH3=p^%9JSLoM_Q&HY_p$I~^A5cAKgFDc$F8cZQnBC^vpaq-J;j`>?r0H} zr<#?AKZL;iU`?MtcMBh1u8@z{9_9mJ6;I4iohNaD0MeRqcK{x*#MYc@9c|9 zHT~6W1d2kCsA^42{G+)b04ujj@8wSM$sf&oqu;<{HwU}zA>SB9M*YLS`_~FiknoPj zyW#rsM5S^wps0mWR<&Q&AzP4{m%u7xeDhck1-?fmLRNw;KkwUKR>EsP=gLa_H&>=z z@71HRo{(Icz~HQ1WF8*vQn=2*64$E;(LU$1A_M}Hji3S4gWY@&T12Y@aJT{nms9Zx z+hMK0GT&>!iZ}4z))x&OxP z*_G1Pt@1(qRDQGJp1JpQ?0t6t&Oq@!n)0FH7a8jD?lb}Yb+=&u!!@SMCg4rsBa2q- zS1c|v582Xt>wc)4w|@D*&+%GHd6x~j?@^h7EtRWq03q4^#}*NX?%=^ch$zZ^UnwKp zvB@3Fv0+SG5|KnD;`XY}J1W9G;+A2IZImr0@kILi5%l#o8VIG0_I=D}V6^WeK0yK> z@`<6xW#Rb8;GYpxvP-`8Q}-$()t&r}%|piNOkn-{t^pP-3QSSoI&Y-isQ2YlU__d?kXPeqeY~xqm#=(whMz0 zOx`1Wz#c!-7e=~?%kPM@iuPdzN92)+F85$m_+K;ZBzuqJbN(5XD2bD3{%GY}J6dPl z^X5C#F2Oe$WEgOk^D1O!2+eRY#@|F#KYv@iY^%TW{W~7~;I`fO?7lOaQWpM6Zdo77 zvJ4ak79_d%^LNCHxBDxfTk!UyWm}$lV_TaqrF?$8yp8gW;^a%s!st3mJe)iM-LtDc z_kbM2f@1P>yzy*C^_6s1!A2sH$1#wzgan7*oGJysF;(i{uK#rH)~}vli$ex-R;GSH z7Y3-6sb7f1Ur>bCUZ%Z6bi2$9Mcu~RE$J_n00kn`%b07w%ezElrwI|bT8&|}nuz45_{jDqZAXltOiRiy)uS|o`g4GvP$L78+%p20G$ zV(VpQK~(mk#ZO!(XyK5gfExRSz+z

}|8?e~n-G(0LuKR^kY5_f_GN zb9cr$|1F9;5}3Nc#jz?whx{)%hQ6X)Zf5sM#VQVl9E??uV(nMNn9I#kQ7P;Z%uLGY zZk~+p?#XBzu#*}Et`(LtI>(dI>0Vr6$ZCX#1YdLVuNm!Q_-6dH@$(+{r-h#jFvL%| zvjp2ekff9tq5QF~77)#?991*cC!xr#9D<(`klf00{FM5F)CUr6xC)MJLqY^Y9NK{5 zkZ40nB{BvIjoOq1Dc)gxm5ocV^TzrxvGKtLLc;F?A>ns{0Q{)RLx|f^S3#0MNN8Ok zB(!-vvP)$AXFw$~qIn#Tq`XW>xOp8%qDS+|JCey1jVL%z>I{ds@m^cL2*^7 zkqv{CS6G%rSW^g6f`qb)GyW>ao)Gp!u;wzZh+BaotCZTN$B|JP#uDeob3K6)7ZiCo zP=waKz!PpYaroqlI9F`F!Yl}q&GUF01brMTe4?ny{f%*Y>Va>oUt1S@??)(vn0IPr z>IXSesUIX4X*x&)q_U8}2^9pX2ztWtLRUKS45#cfA;G0WhoqQ$*48}K^7Fqxy}$z` zhYXcN+Wcwk^6}We$+yjyqH-urOi{;{xdb*h~OS-+E?390?xZN+>QV z@aBVx+&>{CiRAcLa%x zEA)llcgY91YvS^O%J>36vH>jE;`#GZ^J61|`J-SQmAouC(Xfmb@2(g;eb7~Z5%(ot zQE6&L(qUOLph|k?C+fiwo;W}-5hTCE?>t@)e7B=ALH2+a8m|X>Y5|;5j(>*&nyd(? zf#VgC9k5|pkMskV9YA^>LWlY*y$OIQ)8NX|58UlwP6(4RQIIN*yH-jR^d_KNP`Z2{ zf`QfFx$nL$U%xs1_4vd{{U8Z4^+O6pVrg`ow>DzOn>nebLl105MNMP@{si@TQx#+f z>I17MiBhNoWOl%QW&YJ>ar6+t&F_5v_Rh^a-`^AmF7<;1F7?B~z`3x&j{ft{=2h~V z`rn?v1SslDLX=!Yn4C4f__~BIyajbUR@tqpqi`|dX^!x<2Nt9TELR(3)ZWTr2$`w7 zLLk3n{3)5lqp7HXYY(W_QSxs3q5fag7`Hy+9%VZcDouMrrD=BSBd-2~Ml-51u?&G@ zzo#2Dvm#lz)vbuU^MBJD+Y&`vS+o(xDk<=wa47NRmDC!6AH|WFf&CD^LkD5S)hQjE zaQbhN0lkrRc$jEPK+%}M1W!TyUv)d2pzsh;n?OYUArYZr=v)nYqNhPOB+Ax%m;i`a z*J4d2sU9>I;OZ0}dF~@kJ|H?s2i01v+coCty7Gaz_+RGlaH#01f0;MtePB^IDidJy z1`mXT34qD5KNP22Zx$#YiA%3HFT^?B7q2%*mfpuWbpGQIE*6i9a1NI}@N>js#CR4* z-pXz;L%43b;s$eAv7414KzR)Uq%xQZbbzW*Au`Us!7M?>CvGsyut)pS4QObU`1l6c z{GJq8)uoPD&NJn&oYPll_%HKn(&reCfeqhVLBAp4R^I%-YJxDjqGxG}{+k;Ta&{%<7Ev{T@zK$(}Me&d1ZaQ&yVtGkk$_eLfQg}6r;zsj(bS!&?`H!T>4uhS+8S0)D=IOM`cdo#8 z1)kSGX?}dDOnpS%O4ojmoY#nPt4!(9b^0pvvTlpMz+m>E-y-S+hn+08uQKyfok`*8 zHeKAZ+Wd#Oe6<-oR7$(L=!@8;Ys}9zxC&2MYYtR8V>hoguUC0*l=3VlquO{!?9AuP zzxff3`Nhp zvzNNRKDM#NyjnT&%?WCeYcQ@&b1#7GfOjU?qt98YRYedx)q#C0Lf6eUe*~v#tw<3L zy+`r}`BGdneIN);-W{N%3Gt#iJR_SYZ>@;VbNZTSd(kY(f~ts}HgK0tTD``5jYPg&t)aNvFsrF;3a49 z%6M<9*~dRL!A<1B{iL%`cM**B2adoYBED!t$`hyJ&)FnZs z8IxdiBFX(JqmxET5a=J25W=$!5iS`=hke@PcV6}}W~}OTp@bCmSbz%jeXufJlR>5+ zy&$kDqKJzkuLa81Eck;{dlL{c(<*DKtV9Y9|-3dvi2X2kSrBobR zRKym-MK=I`X&|CQvVk+04wYN5#&j6W0555nt3XL2!O2{)%SQe5)Y1p~z)eoU`rYJf zj}T6%`DUD>0}>qM+oSUpTWt3IBK!($oj5M;rfiZapko{v z;I&QONd!P}Fb%ZC}geL`T z3*T`#ZJYPr#didfI>0AltyQB1&LiBri9Hj!jfZ3CYAA}J__y;1$P(*?+whYNu7=Ne zKoE5)D{YiMY+N_73P~e4(!>;8@KG6Z4s3zS#eF75g<9n#Ad0h^{$6YDTXSai_bL>uq zZS9O#*oJ&q?*cevV_()G;0D40VUSx*`2sQVkc4knyv#2$BWs(R6~n~h&H+RpjL$uZ z#(K=uI2`MjW8+)*;8=pmnh^y^PD{#UJ{XRA{a`q@u?&m+8jcn(YqRe+0O^#iu>Oro z$g&@i9gFVuYQ)?&Fk+NDyj=zqWN=PH*na~->764XDYR18?tkvv|z}}WL;{U-MK(Zo~ME5`QaS#pO~KW z5nQ}nR)`aRAPNe7b{G()-~m{WJz{JurAmB?`^%2U5KGLMSd!w3TfEf6eOICbztMRt zSloWQNm6>GIapwpvmuTRBEsP!G;zl3=74ZqZi8(bah~ULLzi3(vG{ef*l&O_HotCW zM-Q4^F13<%bIgH#F6wY5pH1wA7LS<@4ld1k#H@4=mV0~o_FJ-NyqODmv*no+f(MF7 ztwweazDcAI?9ZB^ak$&FiaLx(n3#g8=lBtF11yUQjMD*KfHB6^3aB1n;uvy-!N04` z%!|4?A_kXEChJ3}DvCfJ>m*5a_$J7Ba%{a02T=H&Ye5|LfZ`=p65OLp0x?59AOXl| zsdeByS$r^UNM!?cBgQeg(_p2J)DJm;ir^Ii(T#mv9ar&n7sK8#`&ya|TI0U;DkYgE zz+!JP`Y=AZ2IuZ#$s1;GuMfDa%n!j1c~LTsi;P@pRRp7$6%wF9xLzx&P&Ws3@ zXT!0FKylbJRPC=#$DUM7y8;#v3Tf zg_B2_A){HO8S}x@f}7)2)dMETdMR*|>=_aJE&DOUJBbG%-O1zu`Z!`t0Z_slNB}IR z?J^54$gq)!F~JrwucMcQEgSa&xnD|Jt+8C?WV!|A#GUO>)=A}{?Jf)idFG+fB7WEf zhpeR{^ftJMn}u*dqk=hyq1@fZ(BYHNFX$@ANo1z5Y!6U#RwREu_ zQitZWN*O|h-(6*rc5R#6XM>ft#H8D%a~2O8?b}JM66AEy?X!sfz)^9%0bZfb zdXrY}YT&MZzqGfJ$tNB?qo~8hO;02)O0zScOeUk zkr>m^#KRT*>CO zQG=ge8`XiTNcVUf>+n6zs}sPa$r{87?Pj@O0o(jL_(e1j=gxZqRo+i7lo8Z{tVLbL z9=8UnoK4ANx+?SSWWqXplmNHjjguMoNFcJO1vtSxG_q`GAqsoEp0~&$6VWmkaGJPL zH7c)E#TPQaYO(}0I-Qih4hSNbp|4N9pH2BR zhT$Q82GtYcQ8}qlPyF6ZC=GHck5cx1@mM#Qr;7bZ${X9_oRA(79h_K<&BA+s4~obt z_h>LOSR)e#tdapkCl^)-Mj;!DUWK}4EnJ#%LF$B30$@VskdsUgIGALEldz!-q--F9 zGE9y5q1V(Qv<6>+T0W`~RnK*mi04He2jv3WQ@d3;FW?qe2rSy;HD^nmcyYN;1S((UDt1X72@-v-a!47?f)<`;$QbNR40a#vMnE!H zsYVxRBcV1)P5~DL$-fnMdMoIc{Dp&AUTMo-kw6RfBLn2Y0f;rIq70u2HHtbRdrZ{i zLcs@WxW|V}Y(FUx1C&Uzl;R7Ue{vr`wOozg>$g^f4GDnV7C;GB z0wu<)r2Zh>;96~VtXz#QjDN=$7|!St;Z=qY@PL54!T%=~g^Qt5<8%i?7%TER$ed8| z1Ww>|bd*YJ5bG9&A_jCHfLQ|t{UHdrZlE3D|7ygWPUnwnpw}5v$XAAL8yQ9S{hayw=K=;+C9y<&$$-hs4WB<9`56q^&CjS~ZblRb+Th=WP(G|F zmzGWZg1x~V9Ed&x0VXNSxCvs%2MIl#3Vp+(E^KihlV&0Xn>#>R4h!eFV~O=lAff;T z+?|CG4k_p!T=usCWmnTWn+2Y?Qf^yA1LfkJnkb^`;BXACHL_P>bT6eaf~0`C6xj8J z10dNzDCqimBjAiM~+x_!|e?7tetJ+rIKMQC`8f?iftG zx=J9}2qu#>^C! z2JpRAJo1Sdvf%uL(5cYFUjD@F-%Gw>o$g|ROMa@Xy3>pLmi-t_*t#hB)GR83Rx|_& zqnP02Uv6(zFy2z@NE4@hY7U(&tHN?kaxg4ETn2<;s)yb|n&YP>ByKLs)9gwPVYLxP zmmyAY(3j6Ho$FG`S1s}HPt8G5r3eJ$2GjMy@r7bQk|;&1v?q9p$Y&+}9z+%IjiUl*0rIgxk{uyKt9MkW#WEZr91ZfjtZgaAn)iLt6zY z;kU-$QhM4k!*!$ODj0lHlR*ll7b^Idz0}|~F-goNX|DL)ncfb20?J|m=aX={<0NfF zA6WL|(w0l{j7(lxOeFdVXe#-wUrg9z_VyNGToYk_N*7wWT%?JVF!=1H;*LGAGN*iC z5`iVyI39#;z~3xuNH1W9f`(R^huWTyn&buyg?ole&XiiB0!{$&a_N56AzA!@=I3ZEeX&#HXDSzrTzd=;7GruygP=ZVIu(7PV8_PSsHjh+}66xQV z!?PN0_rg+x(Kz+Di&MXWhrl#(K{cCkRE!f-{g8{9oRY=$3^2EtD?J384&_2w;DO z;UWzxK871g+!E=Jj2$6l^E+@g0Vm@QnSU^j23`5BSv05(#|ls_y(wDZHrLL5-wv6( zUH+ApO8YHj7_97Jkpf)S*r7;!h3RCAw^7NcdLr7jvqBfaMGsU#z@7H;31bsPFj~i? zWT#11?9Uzl>BLa6y~wtG5(p5_NJ= z8LN#`id&(FE*G*km@}|Sb3$<2hP_gfk2Ve)mG})jeZVI`Y{xU(H9{kEdjiHKV(mV& zr&2H8*=LrCf%{Bb?KweAel07ziWXhcPy@}NjgV*Tkm zV)0;U)W@lt_?v{pimGWg!m}BQnLAd~4Y6(?h@{#O?fcAOX*Fc5@a2<*bZ>(*d?a@VD^l*QNe&$5yr}oiSF+}Za&*tDi;{S{w2GFT{m;`sgM zh@-0M6%B%JW^b7t|a86udVz*8VjJC>-{tVpyOF_e7_fFVIBm;M$Y4de%D zuvDBhtW?mMe~_=ulA{3tYDpnbZ1ua^#lGUWe|V{uJ`e5PXK^@VBkVb*Q`>t zAt0(vt5n>3z)Tz4$huHxJD-V8!N!x)sdaqo^y+L3#5=WDyn6tn)EQu(U>lcx>IE*v z?A2-PQx^+)eVUb)+9&283X)tE^JQO^E|&d-!0Y=9 z)HK&?t20gf+)-MDptVL8x1zC_a*mRopwz6nTt-+?s1qR#p7a0$7^%PtCNnFTo`Gw4 zbi=d)X$*9Z9t>9O46=KA??EZGr(vK6D`!RI@ilMS#wG4A#JH5{1so-cTvs2bWR<_t zZK2%REoZY=M-{G)xKmP?Rl8K$6 zIjs31iV|#k1KC&0r6pELaq-69UKxR{{aT<&vwzSdlAgJkh4N6%;i{3+z8Zhis93KA z;~uQkx_YB0ljy}+uFm3JZFtro?l!r6nz&K1LTAZH0=(SQhsvllP@f^>kVA28rcQx! zLazf(rgC+mR8BBtScS@a0l_3Coi|E#0ba@)YX3m7hDAS;gtbeFs3?jlz>a~4n2&lM zNDQ_Yq}m?`J!M?xlcJQwCXw^5pT$60m}LNvg`qFgg9lyRqeKF-Ody_Vkgo$Y3-Chz ziT$7$!D~XcClLiD#w{MwJ>>|;On*@U&m;t~?1Dj#Xp!2IYW+z({hgU>wovIw&k%p` zTX}YZpW$RG`I(sRw+2crBD;=SgeSavajc;@h&TP#SX{HhRhti2rvc$mEEJe|VBpa_ zC8yyaB_~}Rqgls^mVIXKV7NY^N{UXB4`mXR2Zky)365OQDRyriaUQWbH$COW%T>)n##if9d{4KN8D} z5I2~_b3jb8tYdIB!aU0=vv5xWh^Jh)ooy#)WC^tksfW(@klKd5wpxYD+I;UL-*ms69m?;Iyj6A_Xph8R?l^ z9{@>UM@yc8W0pq8EFfB5%RwoORO%GY^A965YDOZ}=3J3WG2mHo9zQshn2t&=2y7#{ zZX5irf{DT*7A7Nd%9*oWF7160kp8GzM1Bv7cCUeF~!wN1WVarw``8XzXZb)rNaW*3mOG@QZm4T`OrIA>5 z-eIS+5~a^#X;@BtR28)l!`?!U(30hSadkm{gF6PuwOqYG+}y+JsXmW2&8bBtV(s4w zgQB&El@?$c)|>Vg(cZ%voYqpH1gK!vTB0D^Ivtbaui4fS$^&9awsns25du5etwPJO zdS^&iK8v_Xv#Z639II!h_X!Ffj^W%aF3GVX`7jy#;1P-Kdq4wh$`S#4tyr96mEr1? z4LR1}sCeim)azL%(qM?6red4kqV9oBservY13VhuE%us>u!$c5yFCbH1GfVyXbLrl zaO>_!b*=(|4=)K=yl|!CXx}ov^VaFnz9;w-#&!H$>ZLx;Cv@O3O0+?K!YZD*g7UJd z$M~rc!W?@k)Nt+nR>&HxtWsoDM9gkrp0tCn+Wg@a9}J!~7bbg&kf{o45s#5F79puxxv`dgXC1GNOI)isx( zg}I_zo|O}=g~ngUlU?=5!JKr{t>Yv^HT6=h0Km`&NtN&%v>(QrO0EK=_=U~G1v*~8o3@AeB;6e+Nw zi2!z3Mq;=-RdY##<7~R)%IKyko9cP%(opoyJIIm(S;qlzaC&qa@o+0C8Il3?L zH=MXI&@ckYwndf# zJ3~KZrSMgtYpRAwShy3?XLn)L1}h!7L|M`b6g-XG574^VN$)tCeXFGSz5b-0=w_pR zDGIg7iffd}pNT<&WVPf5h=ew!id3VQOC(U5{aB*}<*eeQtA;j;xsFQ7{_3RNU9I3h z)L93`HDG@PP3bDipLEUV6XOc4ZqW}wPRtK03iu1f5$6Mb)0W?q!O1dMnFT>gp$`$y zmTD~fU4M{WhA6@GLfvUVUNu&{@dj~*5dp*y!P=*QNH{Zvr28S?&L4 zCfoO$WYWo=?vu2oEp2JpcjzFca4*Ov$RZcGs6dNe@w!~s2~aU2LO|rHS~O~rf~|z* zngCV9YA6-s1+^+lxT1zVDnBfW@_&EMcbS=Fx`E!;>t9}EX1?ENJ?A{usXW)n7JXu+5)U()%dgeFYBx>S?)nEe{ zZw>cto>}A6;?>sTdK7o;UIKz#P4>DYnN!zoa5pu~l(?9~(9x%cx7v_c34tw+VZFw% zjNsx?slly+h1iI!a*Ts&8V{yC$sHrXE2|1uj-d_fTn61y_^e7tVyP#z&=nBPH3V94 zg&Ud|O{IJf=luYpM)*fjp)n`xeYTrd%*_YsBD|u4X4i8|&SV zr@>y<$#kcQ4A?~QXF{}}!X6=#*l2AU1PAC6dnF~MdJd_^u+6D&9J;`$KZW!VudcOkG5L2 z=AqkV8y#(!42lK<_ z;Z~ugfmB&dBLS7>L9~VnA!ycDi}{GC#S(x~uUG=cQ-cviZfL9yK{S9x*m}pr5K65< zYroh2T5(m5LBuJ7@fCEQz(6!uZ3IaFJqXC|86_uNls zdi!zX4{K&hPT~t1XfxuNCzy-pcwf?)m#+&t2N)s_7n28?R4I zO4bD~!d~~yjJE_O_GcM(f5UF@(sXMRx&S`^ijHO~NEjj=-OLVe(XkTL{K7d6-;-om zO&Hwd^hdQS=M$gY*%BN_Jq>_(x_sTF_f~5H*_~;*DLqAts;j)yclB+4o4d8ct6H%& zX>G#kC#9S^$ON6-G&5CznG+b3QFK~{_{lfzklQ8B0+EN-K zmpHy{t~;ue4evefW1ZfyorA`>7IHh1H8&3>+B5%ia3H5S&Bbt?-mJtUE;Y+r#C=}} z&+;Z8#Jr&$5|L@VzLeljVsKz>a@->AembKN(wWo1c&GKfYFY!B?GUax& zdK0e4VR!E=Z|a=kvf}|{W^noeB&FT$k=L4S+fN z>6>ddoMdm7u8caIl<{2eZ0}-#(KyHZ_r#^{gLAwC5?8oy&*Ajkjc#lXME+}+-py;n zVLfj*?^vMs!QH$=k4LKxZ->H6L2sDbUn7h@Ft_W0+ z=>-|>3s=ejmi>|ARKPyTE0gZfxn5hY8>$8U9crhC>1!*`*KTyiD{4e_*Xe320p~&d z2fchT`hmLN~erj_k=9rGCS=lZdL?4PW< zFV#^|mvO@jy~*yT-M!4g_JI6}3ciNNgy$%jar*ug4^ggzzljVPaRu_gU>cXyJ?!AB&bh)z^c*l^u{pJPUCrN@oZz14%kYNx)|ByRj zp?4_nu3Ct#=sx$zLhyc{OD%$v+~*EkPym5X@z1NZ16Z|*c3Eh&~N%@bcd-2R3= zSXcCL_+oE=K0dtI+cV!$-(qjyeY?1>E`47rv{I+qZ9Y|ma@p~O{9{0=+i|lx&(Y$p zS>(05y_R_MtAhIO+$G+}6F0kGFYz2#f_-RDZ)5PW#GSX7cR|Y#9e}brMoHw1MUZOi zzn6Co#j<;2D*J&uYj5v-ns{(;40iXq7xre0az4y}Cs(rmVeB82c)Mnb5z)K{Gip~IX*bvAnB&Y*BGu-c%Jwfk8k8+oqb@8=y7y!L|EZ`t2opSeF% zzRzvk-w=QB0eI6NaPK(4n^7C#KUG&1)cE27R^zq}!wyPiWK<{!#(QVB5+911aSa<3f9us6BA5A(8BR)2=O?I<9B)nUwWll$gj z-f_Y6X+l_&t2^AA&vh639q#QV~_mS?eR#?&U(jZ44<1)B$_lYs8ymRX;n@ z+rr5DmKnHST4wOBISM#!a(f<1cFT_Su5V`PqT?yjR$&i1&rB-qmyX4P&C)&Xt<3#v{U3X)H&o7Wk37ly6Kx)F zvUdU>KYTl33*R`|YvaC&>reCcmC?BC9o_;r<87dI$es8$uet3;8OqGuteqiu4f0nP ze@r^dJKWv#Ht!S)Hk<-5zvjMwmUpPT@D#5h`Gb^8pXD9q?mor)N#ZfrbEXL#2!)z6*jbw_ht z@{aLyEZNU>MN@feIE$t3_r3ua-hf5`Qp?ac`0QyI+X#k0LVrj4WB3V<~Puv$p8_u+G3{{Q1{JjdIcwe<6IypzO-6%8SJS1GirGWhV)b5X)> z>3`?B-iH%yoZ$*q2jzlNEb#PC-5>wmI}pNLcphLQlgfEe^rP;=^St8|KXrGV=Y5^Y zz2luw#ZTRX@AM8z{Pg;F!N4DKZ+VyZ-NcnH@ow+!++%tAyS?QzUcQb{;F|Q;;`?89 zND{e>YEsQqzV~i#=7MKc#xE+9og=AF)D(k`*;4kKpLgE*-ooJC%-_3%&iD3c9*u4t zHM*kN8ba6%{1s=C1u6DmWpM$@5Zgn#*@TLRD zLoV=0p1#$+{Q?BkEiQL~_W`nlzkGrB0Evd~y3qR!iH6>?!n-%|OV{uoua%TS(^h(W zlIiEIE4@AV_{2)@zLwvmWE-zC)=9MKYVNE_-{IbN5%T;_cl|})N73r6_V3-i_fY%4 z-5KveWB70Pk@tB2wfOhk41i{;JM48zl+BIQywOy-McRuaDwrCj$DTjqGVjF~HR29^ zulI%N@bL7rpg)uh>{wKYKTB{RxtHGSO<(YQ{xyfo@^4Wp6t;M#f>@<&Rd9#p{{O-B zpL6T~!@FYsiz)EO*@_1e030Vk`lWnBc>VJYxwGHr&1-)yZqGoGAH-MR=Pj&Netp{? zq`4IKeclnRT`yh{Tnpf%V+RJts`M}29v6EjExDr#@q>`66I8W_<4vr~Aet0U!@4Rm%9uv-tDgA{odJV)|b5Bdk+O(dOtqWU%6*KfdA>N z|McF?n+N{Wd$Q=w=Xv;v4|*^0@I4>0;* zxp_o;M@(!s?&!^(AMuuf>A!r$`vxDEe9~*;4ho&}1Eq?NL!>6tPo>D5zsJW>YDPZl zeK0KNF8r9+n0U&4{A1qL(W99k_iA~x;N#wTJi7YhEZhHf-}<<>Dkv}~@l^k*pYV3) z(nfWqpErFHzj5%hsjiM)4P6qqn|`YQPdV?FMBQtIkcPKBw7CDh#5cHCPbd$N;e0J%9VXU{!bkdHJVC4SXXelut+Ja(9UaOvV#g4e&E5Y0?p}%q6(s; zD*5JI`Q@zCpSUTPdkgpGbbEO3v8Y(sdL*vE6a3K(5w?ddo77YCWc=KH`+c^!Tg11wz z@aFAhPrGc_V^8IQslZ;H19PkX<!me) zYVTO?_`djRE*618ue+Lc#|*xEwKu(mfFg$7Y5d2$DMqX>UG25E6u^YV5=`L-Zd#9b zE{%P%$6KPt`g**U&VrJ*j0%3+vd{|df!S_!scX>e^RK~|3?0}HZ>S(D4OjRuC$cQq zGRqF~3+$CXc#U^RF1510HCbK}MmW}$#s9=H2?YjTiYu3oNEAPq(0Q?y_1>gZDwR$( zHd0Di>Fdku>eAX$WOb)axbQ6s67!8rFAAARUvw~w!A=_hQhPc%er$&fqQ^XS=U(fz zO(jAwkscz>&mt?z6igbm<~~P(eJD^K7SQ&kumGGasHq*RkQmqeQCPDoECAk%3S_Ip z7e>s5eKvgFYd+HG9K^v%v1ult-g(Ikspr6mtyjJxJ@|TE!V=OIgGFE+PPhv{?|rE1 zAxYrx_y6|u-j|e4vFi)o#mUvT_5bM$NL4uEe>t{PY(|~;liZe9xHUS+paw4EA6zoU z*nvWK{D|LO@ed_ow)d;!NBr)Ie<(GwyG zTWh>`0fvSfym?Ifpc}jk`B-~{x7)07Ek>Zw?w!~^sQj-L?u8q?6Wr3hCROChIurGW zrMK~~!Tu2t8xwtF@UN+Ss-Yu9I0Y*Qe3+bh?xr2=#5#rnO9;-rCkaW5&#x zGn*%8_}9_VF@@W~hORbwN790hT2c|mzX-#)F8;yoW2@sITt4E1Mv^89eX(kMyhHgBaVOK`@f)sW%J)l7wmcxoaXXTW%aMCMKUfZ7#c+VAaY4f4?w^}%47@AD zWPF4_!sld}#?Vd1N5#uE6Bqhd!skxR*00D;x;R^LjKt8qnoq1?OIO>8 zF6$P2-CJBkH)opW-XV#PrE?6Oo;&MbDv#{F=d*2YOOa@;ep8;y#)kd?r|g9B2j`XZuGu%XfQAx z%2C9vG6nE$1pL*dw?+Zen4Id^tG@oaTq{`)IEq7afUC}Bze(_4tz~?X7OeD+vda&a zT}(19o!QQoe@IRq_QGrqI}jBeWP1h9OOs2rQ_j?(ndpIDW)k+&3txJfI(c-&Vud5! z#TBIdt5<=H((sh&1~$NknIxyOXjdHN*HH@w!(T#Ef+&`%^sB^CxW9fAi}kA#BRcd` zP)cPn!cfeec#}7~DM+Ga)LbJy2~IgFVIKR6>>=*So4oxGvySEGFYb69PolOql7%RUR%BF{TFxJI&Vq??w80UFX%yQ@4J~;+&`|v5c{gz{bp~#aH-n+RVZ}DbV;-5+X z&8@lxk@Tv&^%g7kGZp*0tNNCgnOc>8J)$WY#8Cu--my%$?6*+Hs@(bC@}|$PH(d4t zgQ989OJ1NQjB9~Y(*aU8e9N2B93iFG>m>WM#q6(lPksxV&8x0qJEj;YMcJ{9 zD_C5IRf#~pOg)&BuqK^=auJ}%Do6sN#VC>CVI}-RQjNnsb0ED2LOmc_=+P3Il|jW2 z0QWzNP&2jqlUM=8Q&gD>=2buozFSYeP|r?Ui<%x^@9kADVkO&8r{WPd-sYM&c%4&N z?xTv|R_YwM30zb_Km~l&ov?vHUFxpgfH9FB;|6c~?j-kMGgqsT%kRXB@g#Tu9o~ZdR0lFkKa^CsYxs?4ezrP@3&ea8a#Gfa8f5cS>HhR>@33}l z3OU=_oP8O=BaRN}%CgU2?T)$C+kIv|1JnWH3fy-UEG-2_=@_9Rr_x<@?%G?u^O`TI zo1&o()+mUdB=j_1<@`Qx+LWsFrFBV|0*N*&ji69uue4^|;eB3v#(=Sf<6Y%Ch6vV- z(aQU|UZtwshx@RbR=T_Uyw}SI{?hF>-sbIl!ef8w){W`!3EUP2rqwlC_^hBBAc!i< z?Ow{Zz?0W1U`@1(;pB)LySlpGrwQVY$yQvPEn9w288PH8dpm~%D&2Xvdvm7n;zNeP zkP-owe5iug*KhYuY$NPRZb2A}sG<*+B|F=XT@8q+Wj-8r#>I45om+YbG~&DW-r>!h zgvl!X7*dAqNLhN7y_;NbtGCKu$7@7=`geX7o#emA!BSKW{A_O73KISKX@SYC^iMsPLB zq7tzB>g*G*Uhgiy2mAPg{rBF3wJ3T0tNl-I#G`=b{DJSmEg$J$|2^;ZMDlAdyA6YI ziu+!6za7MC^y&UZ_j*?)xH{E+_dZr}zPT!71rd3(P8jxZL*i}95qz7;myy`B0zyCZUJH)d-8{i2&4_a>zKgbMTb-#NMNL}d;e#kqAV+Cs-!hAW~E&T!Y zu5#!9z&mKh)piVy;H`?3PEFugok!|*D76a*!`I#Sf8cdYA*s4HgC}P^xEcxzF*&dl zLO@?|fB6BS;5}~1kax}Q9hO^6b%(spstx-?eotr5C3G3=do4kF?U*b#kBaHi5cfljx34x^3fl}aR3`^c>M#I_7 zYu)3Ic&9Sm1)IHhCO+?aHe&$4+g1M1Kyla)Is9;)``iyP2ma0d?1v_d?BN&OiI0Mm z*WCvm_0B72S-M|5>dnmOB`SioCcMB)SR&9!_qS|kK!u?>2cTme}8A~^@ zdFIpw63q^%T<%(b?p>ZZwErtV$9kNYSVtkw3c^p7Q2bjPl&w_FJ#Df64!P zzfLqx9F5C7?X@&+`4pljG7n%582OZ2{wSuT`#qbykNsLuNRLz{(YO{TU`fJ}wq_X&W@Vj;^iz?JZ*D#|kgO(2 za&!aYGyA{U;(rsC@U#B6w)rNc_Wdll?`M|MK$0VE>EL{ke&mJ80eQGt)n-qI?f+}=W^wseQ=%3vm=lbVNT37h8_s{O!dHw-MPHZV^!E(DqC;5lO zV+uMP=6BE$QpBAw-(TRo`Tk5awzD&Si#v3_U&HsK=lh3FCR`lj-x@sh7GSPpN0tM` zcVC&$#AmsC=Cjm@Gu_=k{mfaG`omHPzozpTfrEaGuh~A?WcXxHtc(t{d1q1b4h33? z3rLDr?3?Vf%phi$wrm8xyKi@Yc5Xni4uj;(CGs~rpeEDLGC({(>{UUEE|uV;f}Im4 z{4XRVt*Vym-YVO)E8;=tBvlYK7?6Nb9i6N*drtBRnwM{Ei#|DjuFRWuP68L4I{Vjb z85-2@Sr@DfrZ8Gz$JJ}Hk0!JQWB)4XU_GbvXOZJHrj4c<{B0UsB1llA20zPnFYr%3 zHamzC9kx!>zcFZ&^1RA6&vr~6`#Q}HS-L^yHdsl(k5rogxz=JA)oAhPh5kiTNTipk zK4xl%9aEEGD6u?E(s3Bfd?osN*L)BhZ}s8F&!B3|)(`mSTP-<;|8?L1L62#j)ctLrO#!0)59ySo?r zGY${@SOe9HeX>a?cogdq|C0p~ek+<{7&xoQ9|&^TF$I{`SVN|8;1Yjkhe4qiC`j%y zL~%^^q?{EK-4kK#KC;BWa0{e>Xly?gp+!iYBQ$ti0%R{3839(9EXaEI>Y@1Cf07wqL9 zT>C$jwq_wvt@G2k}snqBSQ{xP{`ZbhNPG$xAj?2C3hvLf3gJGTIz|4ujUOlu^Az8!@YL8S`T( zxbN=m&)dB|Q?a2l^>Hr0Jc14XCobcAM}4p;ScS?>xk1*Q`i_*Fw2!|(#JPMQe|q_( zj(T_gKK|L$m90t+<7E29@=k9bg932ekf7-3> z#3LP*pQQ4(rAy$-$fWvD+t=SWQMa)^SrwS32a|5n(bGv^a*dZ(z#QB0L*3^6VC(q* zrtI&34FBKt`%~vyw|RemI^M(n=l1ufBqrzmfx*#Z%S>T@$QcJB-8Qb2}6KXCqo`3&9n&b1=2l^dyN+u5S7vZm2e2~9qRrU|yv)cXJL4JEKE!4@-l>Iee zP6Vjz1P#*43~^X?NPZzs$*RsatXN!D)>$Lek=~!!b(O5N?R>3JjE&{s#1pbd!aqm! zNuIW`M3S&BD43vMda2Bg2&JI8!mT{mpVd%2{shUp+7I>*Kd?*gTnkaiDw@G6z^?vY<&LLm#NC z8dlf~cO%71&Cc}D%eDDl_BzDxY?MKrbqBbVd!_ut`EdeO>`oTjVOGR~*2F!tgSLFG zt?tm$KnejO@R<8g_pAN=>U~0IbR6?xlBEE0nD+_?WDWLz`%wSFL~`vWH~DaMru$sQ zQh)Igm$lR$8l*5-x}r0QKPHz=(9s+YvT;qRI>i}{0A;_hBwNox2%aLUmkBjXSKqrMXPe@N7%ySi==U74MfUs1ehz zwXA?MDw^%#~|> z*O9h0EcP&pZ>I9p&h%7#6@1I^r82`kM#rlvOGTDdSCoW^HAwcS~GPE){ zVqM0y@o*I?PK}*lj~h>ZlA)>t*dBW>`eZbnb-GPlt!Jw8?Pn@Kjv&d^6?NK4MByjY zA*DI#kSTsk^>~EUFl7YoCnVr}@dZgY>SgMVnaYH2szpkW4ObWMv4)k#f_%LDC1f?d2Ohoqj`WYKS6q-!3Fv~2i>q7aUzGU0`^YlbaI?F089Sy?_v2-L=erbF z3B4-f@fjLjoVdWwE^zFxj5{tYq&~?d&5O!*7vUuB#`3WI(?EG~;(bNslTrks{`ZSZ z>Ezb}>TGqV9pxYJuf*mkMdz^ci45r$t)ZqB*~RIPX65cuD%V8k?)g(+$;<+jQx#j? z2=Qizn^t6Y*Qqc1>~+69%3pBd>c)=UdBY4jyPwSLPWe(@{PcBBdo9*!FV-SPc1y{+ zCD8%n77UZ3p8!rgKY@%8#dOT!JEiKv_V-QlTbhDaVcKVxPY|Eio z^5N2CO^iq$Ko&fR)eU%pDIR23NE#aya@Hv{z_2rz=ydNt+Hc!yGw(C406sxl(Q;M; zowO-zfGbU^6p;)%XqhNomIwn&KkM!}+Fx83K7@98^2*VEr#*p4gEF&^@s|-Gz2X@E z#N2QLSz@zgS&f8h+z|epID1)^JIK-d!ygq{4EfQIsw_vAq8}8@{~+sf{zpX?$3s-C zvak+*fap!GEL&Mv4&PDq3Q{hvR4{~OVI2Zb3+oj0N|0gUn}P<)!#6ppW6(=BfP+Rh z4h~UC1IoCu#?#PP3tBIlx?mf_p$L%7f^>8dh>E73C~S__^Tm>dZ;Ak3G}{P$wo%#k z{D6Pakc!Y*G^e6z6al4ZL`7pQ!c5WdqJa~8P(W+qLwRJBAj)z57GO7;ueDdyZ_$W~ z=Eook;2%+?)b;HSqNuC!4dxLQwY%d?1NrF~=brCSP-f>5bj^j3d<&ek`+QQU`}=vY!;D^}Xt z4(q_OvRfL&6-VqZ?3?dYOFv&LF=^k?DeJ8X(-L*PO5B80ji8KqZ871)Nz^v(7zDKj zvgNJ*p5B&*fC;&VcsU>;qb8rNq{3VmuD8NA_qi+wA}2d zjd&~`cU`6o1xo`RH-%yM+vEL1PkoI>Bu=)fF#^`RxHkO=yP`Y{<)o$4>i3^ryGzy@VjCXp5`TByAxn+$~fYIokL{=b~G#b%bNSb;vh zU-Ai!nJ9mzlBpu@M@tImZea#!l@SGx5iv#aPQnnNYPi@+{gUGm2mTB0^;7+${NBdS z`I^dF_tv-jv*+L0*pboCzQ&FYeXeip=+x&xV~43oYZ^Od>Gx{)<+u9}z(dYC&7ZMf zL`~e!m#JO}s;Z5er0&^*2u6wqpkX2k0uJErJk38RJ=(}@8?s#$(!xc*vrosI_6N7@ zbpM1IS{Moh%TKaOkTLwBbKG&dzkBT{lMYDjk8bpIf94WZQe;C=OvsIEh_`S-ifu;_ z;J~CY{Rg-I8U9lDoiqGdt$79p*}}l;GWB4=jh^ApKXowf=TE^x$xH^fD4`kLj;r;F zlG)Qlf5~hQ3Ms^Hf3-eehBy0}$tyd`nCv`Lu;zx-U3{kB+VTh9S8*k8d8U#4<64nr zjO&auu`s{jK76J>3zhuKXZi=Uz93pF-vA2XIetLp<)iMIGyNH@e+b_Njl}P!zXLUF z$es5Nf1g=fG)#6DkL#0Y26X7%gb=L<+dKf&yWim-58>CG<$s=PUpdQfKP}|Ri3~wi znet<_Yv+QNuo;$@sp2T%#`Coft^I-*At9vySlbCJS_#*|f(QxL|C@ikiJ2P1SB&OJ z3IshS*j%bd8nlK95h?C5dO^?8i>Sx{M`W8|Yvh255YQ5hvl1T;poGV*>~OQr_78Ml zK6~5ff8=a`{=tRlM{Z}D;ZK4h&L3@9g#5t(`GW!4^T>Cvp6%~>MKfPI{NWUEs3BoP4FIM8n7t0xcQm2rlWdt=cK*#mJ%kR8!>B@q~WV;*R#uC9w z#!MM966x==b(ioc8 zgHg$jopH`w0{!Y>XJ=dREl z*nH|r0doWeM-s3BWvZ2=IlZ#JqCAQ7E*>U`Zr-NwuBO z)b=E(bCK2A{Q~7wdY|N`(jSvg0J299kxeG3jzQVbYvfSk|J3 z%)Uq^%j?oPM`T)^U6-SKW*=V%&-rkBGMIm&zPU4@oFssAZX)xU^jo=7sxz}O25TUr zHV5EiRdq7!ODLYIH)^TSI=2V6eQ) zwHyU==O(Amc3rn7Cb_R(l1{mvZ?{*v^!&-4)zxPGuXWE|=yUbEQnc0Th!R!9;b~x9 zVar>s<-H19Rt<#?Q)31W3t^ZuTdVjK{=4*BneJ4jEAV#$=gmdA^!DbZqMwFxV4u{4 z42c@bG%-vKOQy%Hwpd!cy=s%{N*;?^4(r+efJtXZs1ZFLgc`YhsysVGDP|%dT#Ikd zteVy^^^FMli`6>)Wo(ldFUzIo@_SU`G_E#SMTZ1Mhv(_4f^|%peE)*92p3dWD~%-9 zX`9apO7v1UFAuGP@#6b6><09R^)$VP3&t5O=nE&RA%sH-ru#eQg>n>Blnbvi`vj*) z)30iXKvgNBvp-B2#}TwGT0WOuBSbdW7tq_8y5hy z#ZwsJYqm!XvFiCc(q>+3EjzUn&b6lvB~~T3aH$TAoNv7p1bIm<$YMecudGs`)vYTC zNI8YKUP{RBXKK6SivH*X^(`{ zrZtYV0MB5msaYjvnv2*&3PDmdRZVYH!yhf4-Ym)m)04NI>rNtYpsj9I`7WH<0Zz7R zUUuT4^z2A*$PMR`ZMcgo0#en%1(07A6P<2r=Sg?LlLzd{8g6kfE8DM(ISbicxD3|8 zL_kj27pwLwyVt%iDqCIES$9yGaXR${)mv>Hhag`)^S{BQ<7SE}ATu0G$ci_HmrVzK zBnf3fzzvI4aTJ2T0c`GqJQtOX5Rwp*^_D70Q!ZhFu!3^-SMaD!Uyi1$NDUK~xnA)Y z3LAf2r@JGwZTOL9L$vx2n}*JACd(3YwUEa&>X_ceS-%pB)iKL2DISvDI-wBkm;SgA z%?jDd39=}PPa5rqOxj9yS)xEO4n@L~&dV@7M7)d*5^aD%wlK&}>&-~asK3+C+VzEg zwn2QFnJm`|%8u3As7vvMT!Q02;}}D>i)#deRfoJJqbuplw8V#a9B?rncGa;(t0f;L zFcNhIxE)o^a7q*!s#8)$SjSjhCr(&l*Rm=an4VDCw@C{(Y0kw=Cg>j5r?zx_W=AL^ zc#E!w>o{I+wDL5q8QDgS8J`;AY0$MhyrpGEj!sMuCHP&`ZBXB8?LjxR70Ldsx_BiZ z{;rf}7%bXPF%OOuq?T!AGG9_3&P$%KT)(p2lq)8KAE+}h+?`TmOQ_2kM=NFcjg_1p zd%rlOE~Buh!+?R+uB@*tFXK2gyGc62#!}qTR}BJJPLkANw}wbQD#g}15Mj)b`#ml3AFbtbWKj9G@j1r$Qxpn4)1sVm@{tLvWysf?U0UBOr-l{W-V{nY` z*@5~1#bc>RRBx;=q#)r|6>PWYBar<+@fc`jMBR*r-HZg?H-OIJu5J-n zB*_`h7_+evgrGLLzO|?^t>I|ETkB7X2qD{LNGzsEXVWV50Wn)}RoJ5QAi{rL$G3nJMu^tCg^fkysY# zWeW!!jPK(~Z50n5YYF;ffIY3W)kRx~7yCP=&e8kQ*Dz$3njMayi>}rVac=hGnyR)4 ze^q=LN+!_<0a{0M#~y%%$(I#(uD{T1SUWhvo24Xz_yt+lYHYD^OqYLAYt4om0YI|*j{6MHRYjYd2(80W7 zJ)p0=*~sVo>{uRyQrpuv)XT?*N;ebHkol`a zNSrW2ptLEPNzz=V_0@u_Y1?pZAp?TR&NiRAxs$~gS3?bfU;R`Qy@7G1OYNi7^S6uk z=g74VU5&IeDCYDpY|Pp_B*8G&^r{yL>QcSEd^epM2)&GL`<^Y?z4{zc{?qO#9xrmTLZ!!jHd zqUe|f0tp$;o^EqbecW$K7GOcQA{ijq(g2PC8RbI8oSR6YvoM>);4tTiOgD_5t{M{D z54J2dlcGcP#yW3}q6=6m_N2Ep*6;%SaCapA3!husH6+_?G5sw$En&XcI%9p2`{AR! z4#;9li+t=HBeY3uX@L)k0$7g9q{}y8#JR`v^c^+%B4e%D1S<)6Q7oZiLUiYD5rIg8 zfN7(S-ZoRwMIO6_(gPdB(5|j0BB~A}`jEY2PF4Zqzz@G-w3r{y4NISMD^Vk(xy(^?KQ#3D!9wdQP9>_(9c z7wl=?yK(KJXlKsEy|w5?Toi069GuqHXvK>%*rf&xV_#NGfPIPqZS^iLfr4bj&dOC- zeplDh%K*1g8KXzfGmglz$-V}5Bp>-&irOI_Q9G=6OvA)J!r61oF^v;vA4w-Q7bGFb*Ifn{2-5@h7nN8a@}xgR-k}}|mY&HgxZouseQjH#)uAE-P-J$XWkt5hioi%a3DPYpGNd9y z6!CLbW-DcIQl~Eo7nz#b!e!{Tf}p`@1;MT26{N)`=_^tcvoL~;7tm@gAd1W{?}-=C zI$?$d)T>XkuXI*oYcmMoz=k&Vpb@Vio{GqeK=LEii|;3zVnn%( zIx&oLzfz^9d}oX~yNyaQ)c&YCe!dQj!s_$~8Cp{XgA;quD$f2*b>fvu3G^I9{(1TW zk14O4vJETK^bie0rcFE`lZIBP>cP+M>!< zu4V{;uq$@N;$aJt#{7l6=_y!*FkPMu2I5J!B;k6X4cjmyDi+z2N_{!WRKCQ#d#X{M z7FVEBMI_uu^j6H@rUW8hZ6n@e+6%e2=?;Nx7X?Pl+hn^jZ$x{@ynC&Gh%YbO1u=1L zaqoCkuMQyPr$(sNV#|*(;@&(J^os_1{aw_XQcw@9hB^YeLV4X}E~}{41xLmDX0!B7 zDF}q1qA<3vAcPK0ymbZXJLuv7f?fGIq@w|MdJTcFsf=yAt21e=29b6;&|>;KOCD7` zRyT11EGrdq^ z&@3Yw(@U)!AvZfiZ4{}G;lRmDEPj=Pg;6w5X9YTBmDaim%ZSc2!eTOm=xy9u1+9iD zjr{(4&P}N+kZTP2XD1&0Nn_u=NDJdn8_FBPeR++Hc)>$hgLMf*X|~ z<;dB9_pUukxo#S_=uqk%poD{cQ-&Nv5c|V6^lzm!N41!j+ze~wdj)HU(=XzalP!_d z@9>`~>3mY58eBRNcz{NrQcjD7;oH?VoX)p^xK-_$HlEL0k=@4}d9q90TCS1m8m4tZ zIuQ><(Ddpn71rnz#v*nD^}0Jh<Z4r!6vKaEe%_A%vhaigs% zL;&KPEoxL%MVMp7!h%XxQ;QU+saSf1tP2VCV>^42*o12L3_Vq%it@S{d4lbY6rEmU zps(umfEcG~9mxxWRtRUtJ!vbUw64n40@c|o0Zt^0gT*Tu-7OTtd9@rx6FViQ#ZU6g zHWbq`x)m{7!k0|{#3VPB3U69Wk3~DNh(JRQE3DIe(N1iPomk9>19h~U^`oHPh|j8s z5vBGw2z;qB;($fmHnNi&m$kh~pimaDS=*}bfX%X}HcJqn@!ez6Udb{H&8KVvQ2E}ZT6)q#q+yGbz;@Qm@6uPF*8#EnzaA-UBearlu9$# zxFuKk({sH=h8hJiR;Jg5rkY-}#q2@F?4xA?i&_^J*N9}dS{+)2*F{Am!{t4`T*5NE zWcx~VadX(k{(V+)k;r$TggI!yUN4Ldh7=11|N0fUHUqO$*i|R*2Dj7IV19^0C3+f) z2I6O1m#1JLOt3_25p0Gjes)>ifMCW(xHKuy-Bolz=x2QmuodU5|d$w zC1lU~03;Htcj`gVjz?T+pOPf=d%ZqkzH9Y~l|_FCUT?U^7|*;SO))Yf z1)8G7acD+rq|Z0UyqNW%>G*sZmUJD4Sye4N+V~5RHd^>95%@ zE{IgN1|}3wBen?fv}i7SqC#eGRmIqhqyTZVl|`%z%|h8E&^0DxO_ROvS0kI`AcuP&j@ z#fd@kFe76_U!7=JKpIHzUp@qBWrwW+b_*oJmx*X`;_-B6!YbI!qFRduj+5auq*}MT z)L6y;Sbk?F7&am<*;PYdLPJ_U5lXV^?RB)Mo=+Eg;Z=b9H`7uBRBgnT%b13?HEeZ^ z&tOIm+Kjg5SLMmL0yU*YWq+~DuB#T6dCN3-vvCbi)({qI2-gw(9;~wLm3%d=#;BlM znB~nXeWLbsH59G0`8&PN+{NFU-0BX$#;?r1x#b0EP9V`LTU}j@+$gNTQdP9NI;~i) zn`2XjR@!)ZiQk0FD+#=}ts9iwf$fZHP?9%%Nv6}PXo1P?7|$eHE2+>iSu~xmt6pcm zUbMa_@dVGg)VJ|ZxuD|_+_+jXZ{3TBs6+R0GB5az-8q`hEsdU;8Z$TX*d8YA#3L0*LoD~4K-lk z7q`_ZAb5!V)hKGyf3=B?7NH}mHbOOy=D;4TORyND=CvZ!P@#}>ZOs{OKtiED&}G5X z0e6WwS68FvF1BI^QwoAd8w)vCz;}8YBhJ;`Xo6nywU<_*8T%X0NG|MkjfDzDG$l?` zVBYPcrp&wp`b5%k!UMQk8w2)ttN=E{;~ENOr^3ynNaJny+~# zNN~9^K@Nxn(Ho6LOfa|8Owi0y1mSiXaJJh+AfE^ZV5@XUyB%d?J4vCL9 zW#bNrDlcM-m5uFl!_qsY3hk!efV%PG{?KeP%Ql}d>V`5)3_*_!N_?TOaYZgt*<=a+G=apcv}}oDZ&D=Qqt8Fu(cu{&|RWncT|uKa+7({BK2iWQ{fU8 zhZIj<-4wBbo+jKb+W)SjSy+J@CsPt%wIGA>;(thY!CzqFO+}$pHt-_wEr*l4$G(XB zig%It_`OueLqpYZe#lSP(eZ#f)=3gge4r_kSr0T7B901$jBya#Vp$M-tPB|pIUl2% zC5DbCt9kOyD6}<*U+>8m(kke#V2_hD1GIQoFPiufn-opJ7c~+F;$fhowGxq3t1Xgh zj@$Gl|CA#!RhOJ8ZvEIbFH;ud<^e`3b4-U;RNDjK({GrPZg%IiG*r9vsf4kAuBypj zh~;S48<@IBg`B&)Ua_F(90qPBPhh05!*J;S_@T2ZBKxl&*njO~8iE}**v<#KA!p^} z3(7Q8+f0#gp-i)Vlgr;sX=cw#-)he3(YEX@Kgr$l6D;qTdI^Y_N)idaKCS0^%mcHa zJm1zKi6hU5`dHmY1`gSCl|dlWgk6NQNBFzRQzUmtN79~i6izw3lwB=j9S-N~^oh$sTfD%whmU!UelT2& zdy4pxE0x6?Sh{hlV`L00-H7$^mhR0htSy!$r$c7Ty^0T$CuZq(uiW4_kQ2)Q#cm9= z&~EvTt=map!BGMMA~bThxZ}R!Pd#k}dz_Nl2kA(VrgW;{6U^DoJdVxTx=s~ywlwj` zoZXU1t_;oDR4{Wkc#h23&whm{vAj7u@^Ys0=4@qBsgXIGJUf`PlYq6@oUL&vOiK6e z$OSA9Y8dySEi^cvOeB8nlJ4e&V(cN7wXu$aWAR>HZ+j*qJ~aio$HR~hS;(W14*(h_ zkudw9EaWi%0z3JT!WL|$4z^-JwxExRcJg@KIUBe*tquK^pCbt4xDV(*^olc$bmSP; zssQ6fb9r6$VpAir$<$PxVkeN*iJkwS`vrB*jW^*mOY~%0;4H*mZ~OZ`(P74yp`Je( z!iti4BmDqVg$Qdh4cutFM5nG;<{ScU4MLObM_;RZvDa_+W(V2z=D7N=`O~DYHba|c zl~+2IjP~9E!!y;*I1=kcC)4E7mQY0+Dj9reg)ciIu+RH}C7x0~z90gm$m~?3*r|$I z&{z{ZtE#oq6`1VFr&OLEH zo#vXwCL3_cyz??hTNk;%0RU!RxxZb*HMPy|!owQtT+i42$|KQHjf<>pgH9sFKu5Ic zRl$2vvQM-&=3N?8?q^^3XLRQK4uuQswotgJ?@+kdq=JB->7DX~g!4(-{J@LWiN4Q~ zNSot+eDsv2-0W4Wq=k^X(GTNnGQ`yPB~6tGJ_-09+hmmsiP4C)rDIdhK5elSFJ9xgW>h=p{j+=HVS|r zw9xjHjhj3;6~(8h0{yRQxdFUj66R!%4^lydEb8hVpQEA-JHzl?lw5WahNaI@i35h` zzU&q`n?|sc=65c%%|_LA7EKHb$RT7nhGo@j(Kc-G0K!GGbg0-Y<^&W71fdgUG| zdUIYG%Z~wSLfN4kNCq6N+rS7hQe9|-kheEGYGbh)(!@au2mfpoI65GVV5q^R@YvL& zqh8W4U#<>=pjiniF^H|zrp<~dq&YL`F>+D~rIdcHx=>G~@v;Kb-&T?mlqpRotlV_2 zylo=e%>yXShLGJ#c68CtrQTEw76k(m17DoTT}f)(afi=>>iIEslUgqv(?*hiE7+N( zpum6K&lH5m+uVD?nwlE=HJcdQW#C1M4r=z%+cF}8!&;5Cx`8A-)5YuugbNj6lYs(fB`LqN2@gVLdG^AAPmr3Z8{9Q=8`J4Y8xsh!}kKdsOoYJ+tF#Q zYCAlpSW*SIWO5N8D$}3#CGwI+mWre_hxQzuLJSl`m1Nl=dT9oQtTMe;vDR}0Ig2M3 zxuN-muoXGArF*F@H9m@=x{UI>6~tU7GrPIm&BSBRUX__+lE9RRA_zbfMGzPi1O^Ks zP_aD-sQPvw5C9-2jD0#h=*jHOo?(vgiPH|ebWAxYw9P4-w+TPj?3v|U={&kNI)1eAcStv^6deEm*K9&FgOA0IYavs zVRU-P{A8TB65 z_~2#3)T4M{pZ-1SJ);Xl!E-u#lpkfZG2p3i6%5t}N({Mb#`^+F6j|2FrQYQ& zYoQ9QhYgl!UeyXy6JS4j6UhE#nxx^u z6UcDXXZuTh7KCJWcjGc+0S1g6U&UZJN=z}6ODdEL9F|=*v`I^p4z7|8RFm$S&ZG_m zWqHs3misOE-lMWXsZci66p9pE)|_-7M%-xJJ?!Z>>N~VLz^87N>(g(xc)c~}^I#`%(g-b$s)aEsNF0n(AySXVY9yva%@W2`2Fm*-Ukf|l>Q1=T zpB(wp7nC+{2T_;MBB4ND6bj@9k8N&t-@nzbev@84h;RZicGx1>%5=7|NZ{+S|4!^{ z7Z=ISSggfDw0ilB7s;AOA@z=EjcAE5#v{W)Zf}izUlk{;5h%!lA-RmjdAG6A1x?1P zMZB^V$m5~}AjF+?hy^Im%sG!AO_`X@yr40mpzsvYO$B#n3k0`MLjOY6m zbv0T{k-#vvzop&SzNr#9NsJ1G@E1s5(``bfgHy?%dSIIx1?mN7g|OyZKk-IKF-Zwq z8QV~lv}n3&SLDViw%Oj`gfVZor_c`8PTVpM)Y^V7I_RwquB;6dTD~)j2?`S-?5W*vD2=bci zXs!5$&Lf5CT-IS_jV#}RF}%0nNni_Koqr-k^^lo&HJix5(`YD;L7r_1B!|#QyM_ELLW8wq1+#e#L8}n77@U_$E)5t} z93bv)yVIX>tW*lh5#1P=Rke7s!C^iiWi%BY1}mW|C<=m zIzS_~)1gsdfge;4R4dujBR5`>Kgh?iOV%`!s|^Klr|n0eBa;k_;reiyjp+tJQZuGb-my5+x=cxftGEp zyZ1Z(Gx;mF!*tX z3(^a`zNV64>(|fc^$C+7mB(nSQD<;4e-y9Lje#|wsZfxst$E3{Sy20brJgBKWL-MX^nY6oPR!j4N9H#MvdLfd z5MRQwGB2r3i>SeVY-x8-o!r<|XC$oULVcI+_b2B@W4mQY7XS$9#=Z;5O84ZWD3jqs zh)<_~jw!&Luwp=kT^l-#Jw><;(4Gp=-dBWn%RDq-SU(ijv`w^c*)H0L|G{XV`+th| zF`<3#|4-0urI~ zWoYH(fo?v)M-ra(Js|(0@M+zFfl|tvkh5`;w8HS1UvfAQ$abJKunx&B6-r|I7;O(! zo&&J{qt!_O_1Z}`Wpd~w#JOv7_QubBDxKZPAJQCmB`@x*oQiAG%c4zAj44p5PeUs6Hi zakI)aq1+Pa=^w?XnQzciN^lf1TdQpe$FnErdFB4}S)MayY55oM1JVkl>rqD(k&pFa7$UY~fu*FmXT55;t+i4H?!;XPbp;nnW2OWB5+L)T1{Mua+| zECq;|+V*9sJtt6~dKCEij3H!aoGg2Y5m=`mn`ioP_Qv3t2OTsZnIhYbUJm8t3WmtD zqLk1TTu3PbQ6N>fM<|rqKA|W(R({;3fr!>0do{-nlk-ynasf(y8spu%MRR%fRqD;l z*ApXSZI!AMWA?hDON8l5n9X$nO+ziGicvvKs&lpucZ0C#6ZR3G*V@`{IU~z6Q!nDO;-blKE?$AiY|>}I})X+;$KfcO-TctD@<2-|ca)szoLhBH*%e+E)*!G+*5 z(ITWWN85X{S~|V>kVnztnq|dsqjN$nC}Ji{;}<(%7u!T~q=XkCSy+mYTqIYdP1xx9 z6lvyc_)q)~i$1_INZzAxD)B)No@yBu&F4kcn-w8?547cgSww-C1$AQL`21tkiAB{> z6fs(`%^Y%|lEhFUPQ~{@^+tUPmnHfCt}IYsIBtQ4P>>=NFVNsTw@$JLR2zlI4v4D* zGGpj9rZ>ah3&ekJ(y6I^80(I?I;D2N)k_WR+e4i1k53XV)bQd#3gS89L{mH#M2iNy zSruy{G*Mz4#4X&KRrI(Z4U-+#C%-rAlU2oW>JqDHg5-4vU+ULvU+jYIaTq8PUjf9! z^dkyr+pgF}pUX7vs@UC|DeV|Rl-dD;ipCv_-9K)T*p2gsh+)OkC?!H;qU%-!R;-Ja zF|Qqq-P=?rUP-ao0fa>TwNvCHASW^HJT#cr5PeMe%K)n0J5n9TO~moVzE1ag4mr;=ix>n=1KP#iQV}V z#4dHliCv43W}!bFF8q;j;Xe^Bd}1mS?c371gX5Ol)v{qV<4{wyY?y&`)j-Q8K}|sO z+tad}cXhd==kLUFr_>u-?sxorf|eamW4p_Jla_9QmM!zZjk|rGrB6f+VX+?8@X++ej0lcR z_tk|qiDjvUN;+x`BWdP``pEuZ2DQ@W2lCnlrtir7uw*=076|s>lDuNUj$D_To7~~H zo>I_JZoB*Xc)9-de_7*pS6;xcnSm`&DN))jC6)Rk+%0)%!{c{LG9p%jM*tGx5iN-d z=)SgHLWjUbK+Q0zpL^;rGn#UWoDEpD-pbkuyVSRFoH`8g%x{qBCndLIhmUQlepw1A z3WL`Pb3xjcp4M&i@@R+d4!G1$jaL%F)MmCfk318-+MpPVQ&LOYM)4U7%pg0T0_g)S z?GnJJ)N3(#r5!NOf-SeD)Tz-%k0k9o)&ZE`4wypGxJ(C-x#&jWwKxbn9%UGWovT=C zAuqxF3I_dc)h$p10*VOEYGNZPwZv<1r;R5KC_a;dG?;2c8koiN8|fS)*G`&1<{;nW z_?a@G0K`TMx1qprxGM{n^;IlS3P5*I z35}eEt1ZZm7cQlCe&Ig$1=Q(-LMt|$_l+)GHoXycc4Fb4@`e{K{X~F&gK(dqw(l73 zi*^d`l-hZ?uet6Y19w!;U4Z*L-Z0$hXB^%C6Tp8!X|n$(fd4n22mA-Td9vR%@V{}g zk9HP;qVPua1c10rcC!t*FWf1zr_|1q{U5IV$H0BjE|L8khdcee$sHZwDTcdDlM`%i z^%HhRh(m+LO8P`1K-n}kkk1$%jo?%Xj37?sZwk(r$`ZkDoM&$YDoov zEx8S!08J({Zb_9I{2PJh?ph){h9<>qjqDmUe^CU@88NCOXp%b1mdNpi(0r)~niLra zO?Jx4OIDg%cg6BF5vZjsPi+(Ul!$eLyJxo|VpZ@Nc}Y-U4Zfvzo^ztqit_G6;1xfJ z4F9Ce=*@4UN)7Fq??ZQO5wT7mXJtC+VkML|_!ffqvOy~CmakL{-lXOGo@g`L6}U2) zal~3`@JcNmv#Q9EHW98tU{j6AkG)_XuCkz&WbH+8rIeBFuEBMk;2I}%o;nV$xr>!3 zaROYwr%J>`1$@G+xJCYPO9jgwY`r$zT)Jf$Kl!~@zuD0Bh9(p{C(YiUV>u<{FifL- zTOQ4bYsf`jVjE7IU7{yGCCn`30zZf#L_RVtY$6VDeHm#(69pfh9~xl6-W3KjuM~sHD`<>J`k9E5gUC@#jI_%d zi#dWD#gr>EQ;Jbq;7s^h3FS)@Z>)pNg?dVIpCWfIrFI}R8OnrlznW7#Modvkk5M>@ zD6mIK|HrGBJ%6QUY=3Ic&n(a~!w95;nlVM*t(qZ4o^WxBB1KX+sT90s>D?$cCFMh( zu&+Bqw$)dXUHJ@g8}S##0X-}OW)sV{bmV#+)*VQoaB=zFtNM+0`z~wmh62u_zPT@+wP@VDY zCD#FrjlXo3;xDBGdO9;5uoo_sE6HA%s@Tgm1A00O*~?0^K4ORL*eU*^yKVw6c1UnT zA;CQm65QaMBRI75ogld2CJmk4F{p2g;KW5(r?GT2Z0sn444q^r2#!*3h~U<}B7%## z2%^i5AZ?R);>Gj5SEv{BkTT}A%{npn_`T{(B)C~2!3C%JL4A3FPoy-#HDzW&aJ#)h zf+OQo-tHaywr1@d!I{|2AhOa*IHBA+Ca*G*XtJN<-uPlm_->ly8wus#yetF1rx{eSUOML}G9o;cALN zM3HT?)xMw*ve?GbR5+D5T#ctfTT!^$&8iohS%Qq}Outl}m==bM z!_{ckXaQ~0L}6u}c$1L&gv1KCeq5g*lOvgLQcDGf<5qSEg`#jZyE;Ti#{q*33%eAq z27}#+rAw(DEZtHA`@Y#7K_^cnETG-j81r)}iqZGr^K6i(VRw0HZ87)AP{^b?fg5pg> z=T2dZQaiv_sd4`ycNgY|c<(xC+$17KJ4yH&$7o+s%{VS`QhsJdjJBX=ytuw_H#NP0 zxgU3NiX!6}EeD{@-Y}v|Yog2Nk)uy>wVp0FwiPzf!Lv0C1hHgo!4ZHU{(M{@2q;yi zHsVA6RHJjv7DEgC<|VHS-MXC7s0=2I?Qt9N80#>=VJ^{Ov2t{eS)uOHJ9&mOnT1~W zwUcFMAkKnmMDZ9MYy74QC%PBSRlnnS4Eg~Q|4Y6=M!cx(HwKc}xR4Kxk(c)52D@z;7<4vX(eB|0cOPfZZPZ2pko*Nwvcw;Z;oA*`)4c(s?Vh zt7PYpD>{54tJ^ltoye=SW6J}jcECi6wy9Ad`G%+Rn$yLp6r&i6wm$JDm8V)^MUuHs z_4bLhyi^=W9&J(!>cq_BcSU`jNRe^OgF~UgbvPkjL2GxYfpPtfp|z`o#%NxtEVbGH264oc;zFl-f0v*;sE? zO6!Y`ad3uIE)J-nrX`7q5jaMl8{u5^9zT zAx_ftHn1~cPXh$TVW*S=GtnKSg;pXQKzNFGZ{x!OLN;8?JL8j9ayORbokigQlroI% zVmQEeh6+$dHX+piBG-OZ35-3WIx)toEaaPM$4-iZIx)unq&hLisxrrqUtm>Q;A`Sd ztOV-nkZ)QhVgxJMz}}=d1V}r9R14w^lh|&DZ<4TrA(NQdM4=?%PV-H9I7_0m$PiDd zU0Z~~xvCA10P5HjR>2raGZLaT;(+XY+9tO zjjZ=^=tea*^E{uilQ7uJDs@F5CiCvXB5ff+_d3vxHeGBpjodD|;uRckp{DH=x+%2- zbh8F*^nAvn|D!MvZg?>pC747!l~_#jbOO4sm8&sEw_@Lm#biO97~ShtCq_3VOiJ2j z8NTc1g7{+T!aB<#U08lal6hMul2JZA^k7R$v6Yh-#Oz8zw%@PuJ5-sZ0doe#_BLtI z8b{)%riOQ_jx)+e8J^z){gFZ~I@hc7_)F^WU`K9%{KklLy@7U5ka6Md^JMQ(GT|H| zmMzkbqSJ#&c3os*840imh(D2Hm{yghAS7dsxbK*DA>Dvzr$Ds1q8tSUF*}@EDHU9V z=DidAgi_llu(%^~KyAA6Gx@79K`z`NQi&!U_=wJLh66jnvU?=zrdL?VK(6IN4E3JC~ILDqte5J4sSb#;rHbH|S=&D{{hu zY{`!+zaanFe1lH6JIaZC1JBX_J>Q_y?GAGSF!5>pLq?e^yfb;+`LYR2xTKS}MAVXm z04!pOGOm{#Wx(`hXU&xs6^N3&mse6n-dSUiI~Em|34DAqlWmPMr(xo**ugOW@uM^s z$yhrX?F&8T1rlun%G28z<(m?_^lwR@LY->@%IS312orxmNmy=Ul({%H4A`BB@}1NU zC3PbFDhHxX1-9){M*HK+HtcBQFwot?{~yZb9mI#E-6^##%CGU?RPXCB*FbE z7s8Qhn?}M*hwOp<9%&BK6vugq$C~4G*xKz83#APZ9Tubto@q9h^5jx=qQu0xeC9PtKq(+ToK$Hjp1_%%(YE;xHAVE4aM#cKzVP{^rmO0lI_Et1%X!Xot~JSk__u7Wai+hRD$sw+T3`1CD#Mbjhs#Yz ziHu+Cw-Q5!?*7)bPPpTXTI;dN*P4|Zzt#gi@mgDza-NZEP+JL(XXmFgOvBuea_A}z zg%ZZWt;KXr=)1&Iv0_U;(77r?7}@hRFpFZ)N>gYJZiCm@nU)Z%2s!?AohB)7$I*tV zaPhHL9XYK76#14qU(<8Aw^lxkw(w&H?d7S<`Dy111uj;N+j2|B=tmZ6`Wq9k8!n+@=#w}oZ?)~R#5^U<`_-e+Tke3 zpv+ix-ey6yzGFnBG{;1$CtE7P#J@ix)-Dq!lFgK0qUGr_W}k7zR9{ELalW?((R1AR zq(t9@hz-q5M;tiUW=5{feZoa&;i7L{p2FWQGFKRuvka-_EwHZbIXu-FopUjcaoIDm zLcuAzc=~0?Qm>p>Wk{qptqvB)tGR?>6#78cf$J|VH<$b!k zF6zqE_@=tqu^Nn>$SaNSv znc3AF{qfHHlxm~6c*39S<;Niaan|~=iPNLgKib`z-aail{hRrP-j}0`znMR0%9K<& zr65drNcg+!&I^K7J-46wt$g66FI^qI^z zcjfn{@!@yn-|Srwt-mY(ZtvWv`)*RGp2^<6>~6l_x$`Lw8i2!roDwL(dC@8NY-F@8$R6#gXsjXUyb9urE{Q zl!)yb!w%lcgxmFQaDG&|m*K36R@|H4j|T6&H$Q{RrT6Av>3O$5cyE4}=Y1l2;6A?Y z-Tu1o=dbp%58rS>P-%%eDPk7Qx}T$UH>^638`O#qN%0A9bia!3*Tt8ft7nBmnC031 z-#cHw{6d)H*Lgn~(64+UY~|NuU$JL7_vXhg(MLEJ)Smt79($H`zwY|6{c4ThT>kA3 z>D7$;`N!+*SBv|#EwW!}_v?=H>=%b*YA~Pq=7rJEDji)>?uYrR_pNB(ALg(0Zo2)R zALeu3l-ZRE$Qno}QF9ytF+cj{kMbM6hi+f{KtAnx_eRl<{L|hSqmTVKf4A`Dj0f{) zalQSX2lGE`(T(AU?M5{7k^E-5{m~=&S7*H$(SpbFbG+Bze&S>KS9sq1Xx%UK|Godo zDmAn`^-mm(%ErG%T;_+VW0$A?k-sX-Q?KJPJ)H6GaK?MW)XSHrPKjk>Owb>$;kN`@RN3TrMN58_zc2mytE2R<^LHQddbX_1Se`nW%eED^^I!f2i<+_V@`*K}F+&Dw1xUz^JALZuzxej;shc)iKoxcY=R7Jk>0sb85 z=&z{i*D9`G;3VvDU`l`GKe!UOubjp86Vv)DALM!~ANfxxz4)%m+58-(JG$WT8Erk# zpVzAOR^Hm5>jtjN!{HBK9xU-vmFukIY5ldz0X!SxQvd5$*HX0AdNlGx{?)a6yt}<>wV9=&)e?Z?_KSE)_cdl&YL@D z_CBBXKI*OUK6R}3NpH}Lyvx0{-Y2||dmr!@-J_G(;Lq?{j@i{{?v=Ti@ep|hrA2D3%pg{_r3F@XMUeQ&TEg}_($yDu|5b< z=hkr*B9@^R{*v<~7d*vrCgs*8-lk4Fo)NpAv{U{jy|0CJ@QeK2s3hggK{Yn0Ub5USH{D6LHXuYjrYI*7gT?Ym-OTBCL7pD^*?oqJbkjUts zKOvsrFPz^RpPvQnH6WdzHQ2$|L5cSMV3*E-rMdD)3|$#nI-JelkXCRN61@Nut*%e& zWUFc|em7NKpT4G&DgKo)gNPg={76eV_Hmqxjt@?z-P%xVq;?Z5&nHK0#%47l1pZEZ)btv>pUee}ts<@kor>SeZ{tKn1Da4c?^ ziW}~lv|)Sk8P{@5EytpB))jmI2dMGF5u<@-1~yRU*(SgVZM1uEa=c-pW2ozYXVh5= ztJ@>)OhBD6TlVeI#eYKZ$JE8SBZlTSRQii)1VnD8E}iZsdSG6|edeuk1nAaCT+|4v zbwLXS{bR(P@bN#pkB`IKzj%a)%zP#RPP9?>q9F zW?ctUzzTs~IA&)|fxlN<7Xr|FRAuWQo)`CzJT>kn@N@>NqS;SjDYQrDK9%q5-W;>f zVO>q1X&H^Ke~RPGB!ve#M=ZD?pD?6U`C>1;ntQUsz;1r!`ooRfVB}(n7kS}W<1xQ@ zJa9$*@oFy|8a5peEKmpB;EfI3)6~rhZj~VmGq16yf!s427BxnNhfG{}D9CY&A*G7v zGFx>smtMq-g@Zg~2@P+h!r5sec6Skn0>rIPwWK)iF7f&g_nx-9i&Jk${aK!RD*lB& zl?xZFe3#uE(@ldR*?v9c{q?=W_$OY6{mX~Nf34J$gBnj(mJf=bEazsp+09fVMT2om zp$XUkUu$KU`Ex<~jAbpg(Rd(d+X!~?OJav4zF4G~m=nYuhG)@W!Nm6RX~BiSpISG) z!fI(82LE6z$9iRgb!isOM8^aQ{L^>}oM!HE@$f^*=W+BVatVi`U;HIM_pMD+S;!Ri zt%c*K1Z-Lcl`y(Nl*@2c7PQ& zSAJ@3Q#)_c6PYUJ}0y9zFKg{G3;=Hb)Rh(e8Mm+<_VCj@7nuHuwm<2q%(TtW5Yd z@7PH+073oC3I^_XqEnv1+#hIKm}ZE{I28XL?1)Z!7IkhXo?denYI;t2dvq#{3>vYWM0FiaAlu2YbKI9+ z^daMUPbRMpnj*+6quenup!WwmRY`cq6!jbKn8KHg?F7kLeVG-qIWQgg7fT{W9b?9W z9WZ0;YmNO$oUW?3#BOdk9I2fQueU!~ov&o2Hqsu-bWa%`3xAOf@MFJ2Zo^eDKCYCb_$*L2IRpAx6JFh`AALcJTj%9S3M-W$xrbZv`&+s(yMvqRxcB9lHqAi zEk2DIxxY+MT{ew(Ob1@AzaYU?T;;3&ZeKVHHhXdW^PWCqZy%0-o_V+XeQEsrF8A{i zeukW;Cq5Bt(3><|MypjB?xuhfySXL*88>=3CpCVGVJ`75)zb0We0F{zhv!j@Ld)0_ zecJP@ok;#{c^+_f6jIJbS6*M}s@a{KXhx1#Ey<$@-!Xx|4X`I@z|{n=*&Chx!G{f4 zmkPwGa;01`@=9~+GVf0l_Z~i2sSx4b3KDNtGA2g1?# zEJ~nOZVxUqXpeSHfObJm%UQD^jw$`YaB+KdQ`(nKXC$~ryX!#X)m)7PIZZK_*;q1mEiQ9&i$AvpGXafC3E^$3KTcSyoyzF6y3N*^R*41%uTo47 zTlUm!>%?Z8OW`jbBO-TbFxFwf-Db@}-&}M~-k)Ck2=1|X;sRRT!tmtvj@8@U#|P^# ziK>hd?ub8{kpRiYvTCF>si9;&E>1mK(mFG*dFdUca=Xr*hZ$Hu0QaF%yn;{GJBe<} z_}wRrmH@GX51fCOfWYfJ$*~<1V8%-QPK1Qp>+7(Zzo_nKgucYPztLgP>UU?itHURv zXP?Q>ia|YXbBwCuTmrSBFrXief^?}TTGQ%hY7VPnf#O)flUG$GK>7B)dJ&oD0xb32&@56W;)^rhk zalnRIt%{k|ND;YPDxbo7z^#N%C#mfNHE75se{Z7Hz+E3295C_*_zF@NL;d@$8QfAF!y zSaui@-COYYs_BGw5f<;q5`|;jW&IEpU7CI^P3xk{#3m~#-nuw-e}R|G@Yz;yTAw@5 zor&CChylk*?tAD;IBOR3ecdO}71_5Lr*Fw|#cFw^b(PDEbya_3}N=a3S zI;XeKK)S5iR_Lh#7!xprvQ5B9QQQX}Jq|FN`%t?CkiLrX-YtF3iZous{b5dZWF}7V zNIllVm7$<5zEh-QJvY328=*!T42dnmtApB~jehQjA5Lu35CF#1nNLTxy-5s=f1iM( zbUhG8F?P&!{Q#hgQ)8-v>LAB&#GnL+fu5i|Nil*w)$8H`jf1_?%a}r)LK`ZhdqX0jlH zjm_z$?dGmnfWfk?Wvl%Sq4!uAi-VqAP)F|W92nUWd^|bbnz@?fLwQC6B|oGA1f*g8 z*^|6P_IPK_PA8wVMYwarU?2kI7@AH8_t8$1)OjLK$C)+fB)fi}Rn6mIz#l)INq}T7 zkVYA@A6@&yYg^9(^9Wb1EO~orHTU72a$fE@f>_ZTJN%xSv;jfX%o?rn%=%bu9uAod z5}21}+6eKwor5r2E+9GfqB)f+s@*I}A$VFcO2&i%AtYR*bEnnBn%wq68yEx_C?--5%!#)Kc!AfU}!6)ovWY0YB)M~z>=3wjZdrMGu zQ%?eIVhIRwWcEGh`p|S9??@elP!$tn7|=q0@CPsMW!$vuv#vR8X^^Z@DT>W5c{5-R zvh{6P3U6GF>c?(lbMr|R41b2|!!~3_7ppp`fels1oBW&Zlj!x3SoE$=e~LFRx}ejq zylSYYnl)3)Y&g$sI94AME1Q^D&$SMsoXw;$Lr%V9dWi^nBb;s<3_#p^E7x$lKZyw1 z;X4s@zwboQL%xZioxCd+g42q>7|V8lq6m82j}9pK`|Mc1)bjuWf-`79tCyb~r zgEA(A_=aSVD~w@lH(s(IS;1_AgdtZE}n{mrSJ{ zwo>ks>$GKSt~@__uuEbTlgzkbI}5uEONf|#?Ar=6X9-)yDt?n%$-K$YNkL}iHt>me|^|;G3B&+ z1xf*z*AjuSn6NR?w(y4939!~{@Gk`sgCy}*Y)1LFW^t_+3bXpFbVa}q_%UJ*fHZ^S zS;h9~^d7&DM4tEc__Mw3(Pw)6nQs|XLv+41T*IL{Ua#S=3?3Ac8G=t5+q&AHRGW|S zV#u#_8;90PUDl?VZ1E~NCp3a+)>MD0w?2C6=~88O;T+UE{MCo8zNS*-)Gd&8I5d!GbB2`GvN)X*T)vulr+v6K-v5K3~x913Es+1 z9oA&Q-%Yv>!x@CLWJ?GKa^)ae*6Z(AY63^OJ9<~Izce<%d&JtrH_YsH0hZy;=tsT& z-ZgX+>aBeun}k=0LAi*_r?dR^&}GD4`0K)PAs2q3pfrVCE*KYb;TMmaLN4qK9UpSx z25%_jQUht5VlEZ|f(*>;l$+0Y#P$vmjOZ1MK1TENJ$NNz_swK zyg0RE8VJPo$J3PZw16m4<;rx~VlP7$5L#J&M@*Y&THB}DCzf~*POG3_Elxc&ts;SsOF5$O^O=dj=hTH zorpCm(MOgu8Iz>b(PFUU#Nancdj7sze>t&DQd)n(#?}}w$=;QMk-S42bg1CUwg%ct zZvd&kw3q*;j7Acj^!cu?T9aE-=pz-<$wBAmk&b@2@e4woor)jLb)&%3gxYpEqKS~z zzFhbSYFm&rc zbI-+cZBxC~Wc*<`SN}GAVGE+dK!D3;v&6>coZ}J6bUZg5Pt&RGTh>plM_n$PbdXI} z544f~!wYT8Q)|>WD7jieZK)8e;h3L-wVRjI&1>8U=|)zQS!<7k|SGXSy_Iedn(;{aKxe=biC;kLtQJ zTD&(>t3CRMz5U88p={lYBto!EpPb|!`h90~884kAyh}?$qaX#JHu3aiXEj$!hug?{ zBTcV`#eDYJHs}9^Q-Qa(_=6%q)YrO1Sq9h;(wEQjFL|xgO~f-T)yUWv{^E7kKUGh1 zB|xY{IAh7)SM@Z8vi_MJW}LG>QE^NU9_o!AndL8g&9aztiMN)Q@lX6auit&l;WLSP zM@^li2Lk5#5bfTLrXYQQJp_gD-qZ=xgDa!Am;K{vicyQqtYby5V{Y!n+%6-d@+Cqa zuO79wcN_6+imB8eSw?vCBZKM){%MMSf#IgkW!&kHY=Sww;5{w~jsWRjH>K(H5UY>w z{gB_oI!EVK{G&=3h$_j*_{sf^?yL9*d+&>~h$AlZxYX9$&LcNf50{FvK$=ra@5Be8YBmN4q>P-kRLD<5gFwMHf+)~5n^w^K0)$}TP^QSl1e}u?qSCzTW@5s zE@%-3*=~g3tA+MCTB+?U@j>P6be3r-Uri@mvxT7Ax9s`lb6_%V*nl4`S&+*|`_J*G zr$^eOkK9&N)cc)t{C#U<^3A}D<)aBZ@JT9u!EeGBtzm#GEdFh|I-A)*;lU5&*m8_% zOW=~ED0OU}k#L*6(iTgcyX%j)>EsjWgj6Q^Xp-plRLlWf2m^iG)6iwhXCUn*bN!yT z4*->1a8(|a18^Y-l^?3AN6IBt{VvRlaE*a(sJ>SW6d=>0dsz!^i3V~>ESr-U|Y^dk+S1C4D5#(lVhoJp1LQimPYA;XI`&5 z!8|nKLHnqi#a4~garR`MNd#w$(m3Or3ZH9b7vqC;7LITYhu02N#ZdA8oKtne5eE0z z?ci2RFp6fh#9OCsBw;NDfYq#l(YQVMuyO>{yt_a6tg-Tkro`_G!J3}nYWwgiB5qEG40Kg7 z#65)A zkt(~u^3ar-0M3|kEP1d+l-ec{1RS6i1Zm7a}F zv0DC}Yir>bYVdULoatybx}#W|235n=7Q1||?vw2BuSaD+O416O51u!aKFI)P*&*R- znCTbnWfDa6G7QrD50i!N`!*9q`&0%aK*TFV zj$iQkMA>G=3+B#U&@$FuXK9wdVjkPcXyOW$x27PRu{0ucrV|WxNJOY46>*bkTJ7mt zZUr;BLmHs%k>G~#VKh46vC_P#f1%%jO7;GQel@!u7GEhvS1t4x!<)7*#9Gcrzg+0Q z?jY4gU2&FS1TrzV#5>wTg~)*wIN@{QHtlr4y@?&b7L57nmskDho%{NI|AhH*2G7|P zz;n670ZbwIIc3b3cxNYloXPIJoOUS0@61;+mwV^k$vgjs11D#je~k&{qn-QuGg|V& zRnfEi`Y&AwD09+}`FA`2Qrb(K!o*?P5)PBL+_Zf3)|>tO@` z4n<#D@v?x*GH!x=J%z){H3UUvHzYHf?&~wi~W}# zGGR1IkD7cm1z;pWnm?wLiO%7JwW+pEEeiwUdtN8Eazyhibn>~NESytXS%o1 zcg37^F4X(4!ky_|v2PR`_k{dme}?`}<4(`DYKnkn3m5pvBDx zR#^L7$8r^mi{!$=bew9Rlh(2ZZx0LJ&hFZBk$!>4XL?sgdmrFWqXaWNCb&B~>;V7xg+unX z;heBG#uJ1X%A`}cU{*@T_oM3%@Rz>%vrY0Z2k+VqYd_n(W`+zvA@0hLkpV}vA%g)9 zjJ81}7(L`L`i6n%i~}KmKl;B1`bTiN?Lhxv1(MQ_YsfegMo{;Spm#7?u>ex}gL|Ul zLH+_??vG0WSXhdVJjkzl_e5_x$e(*ME5SF}qR;|SgKmpcL#9Bc3@Q^=6=)B(Sm6|yo6aDD1lblf5S%Xa6H&&8GU(QSwLhr9sR6P1>V{Mw7TfEq#Y4@Qe}1V-ee z6-)g&%g{Jj+O=3RrIJbAa_*4ynhZGC5Whl{K{QIQA%3HEOZ}N4J)$*baINDRa)MUa zqt0MtxTZ3|6?9NrLq!;jWghZ=O7BNcFZJh^N#LZ}CfBSll&^T9LO>QLk@_xp}3z=ncHO`h-460_Ja+Dt|0Fwp8C)7E%rRcrFj zKr5O9EH|dg_UKK^{kaJAJEBXL`?JzSt~M_B=d=`pq3HhQ{yy`L6bXl0q@iW?KLEah zk1X+UCuXT>&1W~XRtnK9$goB?i3bue#KX|i8mk&^?GGLYb(VN{*4u7nI2!YIUM@9M zRb%t_L!#x9sjAx)Z;UYqDAExcszV$IIc8It!U{BR9o!)m;X8->3u~|{o9y(0;p~VF zsi7~8Fb)jBR+_QIJd7t`Bs^9hzgPz^&^jLfE{z{n%96Lr2-j2B{AMEG6n{j3hHJ9@ z$D$I*J(~>%#J^W(4QFAoS!1!3l#s3zdharQQMkV*yR2n^Rh0u-*KkKxRQ<)5__Jr| zE7WOOheo!E(uO_DeCaKL36$6# zH({-^ag!(OO-9>n`NwFI34bgJk`L}O{;;e5&Y0_VAV+?Yy~Gi?h?&cd@TcYty9aXo zy#5Hkudgn3N~#4UeP^u{sY3MABm5b2$=YRouFjFT-D+AHt^xLFGh`?1(Jj%EqFG1! zd-eKOR24~QU$`GtP89+2(W{R14@CT)et0HM}Z_`?NRwCgm|(Wq{+EKs$<5I zI$R1N)-l_!T*9bjJj*RELl^`Io;DMBolIaky)fVhiP>iq4Ke%I<=LBy$@u}CH$wJA z@Dt3`#Y;T=w;36@SZtjTJ)unVNMw((9a7r%LxdeMLz;B-=u!Uc3@mzm6kb*6j><3d zGjrK+CF6k4ORtn&8^@Z>5$il4xMPohnSXE*Uv38fnSCs}_+`L(IQsI-{2AWn=)sry zCr*(=tc|f@m|u(8NTO48_|g8MoqUp?a(}rH*yd=}(S##KNyB~UE5ONYXIxPau`ClE zk`@&BCA@+;@FK1jbsu{tf6jfcjCYb$;;?0LjHD)w` z7GT6-O-n+iXTvjr!X;VZ;W2Zd4sAZ*L{=`iA+Opz^h>s|=8y@$;;^ZY1yIf483Xu-?(6UguqiX3}8ue|Jca4rIq-S4>rghC1OvUY&M1Y>20hJxru2! zZ2}NPO~O#y?%&Zkvj$F9Krd=zDXR z&fx4KG6~|jhuO(7;H2aIWg;_ncD`zj*cwY_NoURh=={-IVVr(>W7y6M%{r55)>cog z-WEu*?FiBUOaC*Z9nllVlWTXidgU2en9t#6278(|c};XI3*hJ}iH4kut=>3dArzIP z1$HR^EIDZW>wIKJ#T3V~Td|W2oy3HOcFKw?88;Vo8;w>G^x739fsY%I;30*zbfTqm zJkl|VjvN(YXg0vXucI8GX1;ekiE!5UYu@rH*ERN&ZGV*U4xZ9TUa^5 z>rO(WCgc2#!QmaX!Ev>-?cDIb{}YOJ*AJ)^YRgCJhN|;I{zY{GY}N>U8!`_y+^5vA zsmeDH-P~EZ-h||EXXOSYM#i}uU1Fb>)AqQ4?e!=MYc3CT)cH{)!BQ}nC2#2!$h_S`rj za0s}6CzQDQAt1Y~mYfA6oM?T$!_@pWIyEh)sEj2`Q0IcR?gY?U%O3Epp-whMHC+_WRLB=1 zd>E=^Npqass2eROq%7tk8XijSP?MwA@c`S_>}0qkU0p0}W@VzUzS^HrQzS568kF=~ zOs^Q3!n!f%#i?ISv7`>T>L!Jgp)E8Xqpj??c&;)jo?p;5MKqEKE@tQGV6mt6KVze$ zMVj#9T9>SW5tB5r$=E12#X(=9myoHoCZQT9C&(arem;u0eqPbas@dqcHi_2XKZWHG z#B8)?%1>jkd*Y+3o~GeTn)t7Y)>{&4+VomN%Jl0hv5cfjvePUhi_tfQ7Y5I-A%}XY`qq{k;xb zm1w!!EzZs?2YT5Dq+RW}De<&bExmMmS8#JiR>nY_!Z8w!o=gt>0JvNV2HK$Wa@+OT zjwdOhI@X8vuoye>+t7!+RJw3f3W^7ZKghdW_;K=dlt8R~9Nv|TIn!Td&IBD*rwPzW zhHf_H#r$xi_lDnPdq1u=m>?BUq`oJ+W2M3yauB({wJDE@n+APc`_^EqFloeA#_THl z&{i7VX1wNct%*&WlBO!lsDvlWvGzX5STQyVN*O0^6s1q`yGuz1iCj2i(e+=)0VHu> zPezgyaDquSb|&VFq+MLBq1LOLVK?_FvP+#>zrpTc!7Ex>*XrZV!lu;=GG^yhb2U^= z{RBaFXs$b`Zmi=~S$Ytz8Xk9;J~`$-lw)?> zr=9~~46A%)I?i$$V!IFyR9e>tyBnIl24b4M{)h)S$@o4N4K$sfnyHVqeoiA&(=~4z z^c8B`|qHal4?PtQ>024K7LH$l50RPLd)A#>hfSn!ow}qw`nfMq7JKOZ>GIyox$)nghaP z107_VC^(LnjBmc894**yN>8ntonD#AE%IwEPdH;mrnxncs3J^=p)czzUA9_i zd-J{<70$6~E4iI72e#$v8Sfm{RdTDB2v!S{x-7As?DY(|sa>8{-#w8Qb}ldty2@1c)-t$3;qIh?M+29wIg{S)#h^UXM)O#TH7t~; zm1nYN6k+CE?b7`{dIo1|;MSLI#je(hDIO=3FPt#l0iUv zk)BdeJh;EoVbapsBw#>O)}n-ULb7Bnf);{eoU5pGKlw0?fT+ke0Ff_F?d%IRd;Ph! zFKjy}Os_1V(Q!P&(zH-r+u&6OhlG}0a>C7s^J(z{S(P-t3)-b{tuD-ObOM#cLb9u`438Qb=h8TJ=FTacx^@8~n_wu-UF&;271$9Ha z$RHuP)>vK8l%;jlPgT-E0mMxQtJ}x&yWqmP0NWx#Kek0?4UwBh_jd& zwd_LYphD|xBNX}Y8Ghfvew;PKg8dD>yc|15YL?X3LabMpkScMw<)^|T`gOTJ$)_MJ zOjB`-hLRh7_>KOY*Spkke3=z8hEQ>{MIhp+hKoapTSH3{0{YNq z$cY_=ia`kPE!!8o*9u5Co=?S@PVrS*Y-JwpOEOP^e~ncUr)yQER^-EG&3i(f1l*bmJqOrG&VtV%}d0wlFGY$DsJWkJG3nQ4G6XIcRyOe(r(3X=t<=nl0;rcx3n zYo)q7pERe1<}Ytrqk%E8*hr!PQU5P4Q)`Wxno`J?=lU05 zs%)4BzFxp71VH6dQox0xQi&ywc6*s}B|HJQY4T0?K>^ajUJ5n1+Iub(Wb$dK))#Ad z&Sz&pih2CiN;pqKX$oe_MxlR_zIy8{K3RR+w8QQ@<$gF+iAd%U>`WMPJNT9xkVAB> zfHr|FGV#Mi^u|4~M=iNYJVB(7-bibq6G^&nNBwW}=l%oh0lp_&k7TRj%j*^X3;B$s z8*3PBv#(NCN$ktfh@%)2b3~;dew)94EkDkp0zl$bM!pX9=z|k| ziRLAQpN7*FWD+Mzwl8TXT8Fbqt^73DYQfjxv*dA=(qLePJvTPrWTZ4;)IQHXoBBuZ z_#AGZ>5Y9hhHSWfgffD0K3Y7rtM8x<4wb-ln3zs8frna+Ls$z6kZBS^0A;|~59IO_ z$8~oV84JZtU_0cTL{Gf4IIYqYVcIF>3m+EKxygua|d z&Z5MVxb8V6zROLM`9iEE6lA(Aj922pN4Dpic<#0L_{wqMm{cpN@zd<}1Es4{H%BUZ zcX^tVW*N&$sFgcSjgP-2?L1Sat4yU^*zSg<5ov5Rk7YYDT`zLZQ?nXhv{90d<#a8P zfU2>W0FJeIO93H86I9KYJ5HEhbGyW;JcPnvgz-6xRZ11Ih!w4+sQv{oS*nP9*b**| zzateubhtbzjKOz-oy!m4DlL~bqg}!0;0>yQl>bOZ(SuYq2!MubpZe%K-MT1@74mNht;@CyG!7g&;X;DP`H_s8(D3e-D zK?&4i3RTAxR)U~-2}ISMzg+v#Iz++&a%3KTFO}jM?t?q zX}JhOwUiw&>O^vOG?qr68UjMDNk46K z`lGbGisEQ8&S&$jQCeh4?xIl)eN9r6ZL)3|1);P$Tz4}bBywat z$Vw6zcYAqv^@Vps$`9G&(afA8sPWL!JA zJ3(ny*<@4{y~nC8Od3W(mFuP;A48!>4Wp4h zk)#yFxqJAXK8&8c=vczD5#-abT;lc~$2UI6 zm$g7q%^U!mzxTQ+Fh=8u} z?%--(#zjK_re?qpCosQY9SQzfzS+Mk60nLOK}}2CoWc1&MS{*G()%1p&@&zhYK8^l zkif*Hz_~{xFdX_Dkl<$u{B1CR_0(JKh6HAIjza=0 znznH2O1Y^UOu|9`9*YCJ9p?Lp5P`EOFy3L_u*MO4mhV~KghLXV?}Ja`9g{0us!mG6(+) zM4H!5f;1rJ?nr~V_TqZ(*}Z2XGtucE^!HZvM%ynxSp-IPd>-_oV0!z^c)v+>!uJZL zm-|SzW-A}d%COuOXwf<^@kkeHlP?VqIBK;Sh{XAvxzu4H(rSCO^G~IoStyEj3ST%^ zj_J(eR4eu(mkf4Kp?p+y+JE}J@yX~JxA#jyLS9vI+3&Sf+f4ItUROwSt&9fJT3opd ztRlx5&}@z*4ES!2mHQ92{8`BiSszjzP|t$LEv~mtqlWP-dk7(+DAv7MzJTN-S6mW- zfST=Cq-qF5un6K-C8A4D(G{bavxhdBhq1F?>XOCA2Pj`+UJ*S&_EYN<;VQF5r%{(P_Qp^z2#tp23J|nx(3>)g)HiZBi6+Kl4}XFAZdqSULuGF8)-G_0 zI`3YQ6(?eOJ^Wi-bkHfR=I(mUNZd-gvE4hyd{Viyv}g7B;6vJ(evPKiywL>nVa;?w ztWPm2^v1KO2L2osCbP$s^Hi8*8w>mPUNS(D8jcV zP)Ez^)%?#i?*=d20EmP+kDePwQNt*AYb<<2IAt71thU@5!Pp&a`9K{3w7-GXy{m1)eeq;3BomlH+KP{sp8hU>Gr=!SFs z=^1h$x}tl|^$)5A2rfX>Ed?G(;dX#HJ4>*(h+xJ-E_ zf8%@NOvF*2FcoW>sNPl+$w^+Kwn2$#+*(l$Uv-i)@(?f ztQbe$cyj14*?*~svuyKnT=X0~y;|b{6_wU4e6=I+z*m#B8mDq16qVLo6fJ$+YRT6~ zc+hlW+}{yYv5V#Av)ms0NH|+9rD>bCOq^J+7ulGsIDkvk@uxezdte%LDHhvV=?0B3 zlPUYoqJx9ZJ4a56VS`|f^_#A((Cawy3PQ2imq{h}wDz3K^$1mi-ekk*v8wV8*hJ#& zaAz~h>DU31>m#g6aIeuci5zW0B1@)sF}fZx6hWolE?sSlhA;GcNwH{gfDzP9i{ z6*P*0(h`1$LPF|jsE1?*HxhoWQ{G@zSq$Q;2Uim=!;hC#+ooGV!WOg6*e@TQaiQO{ zY^WQ6qE4Ek3%@oQqU;ejB$CDA(u)Mf+i;ndam|-F8f@c*{;Z(rDv!%Dx3NS&;jTL< z#quNEU1vic6c<{faH{M=nY?7myKkc?37IP8ih;s4tG1zMa|(-A!5!&jGh`{;iAy?S z$-DwqmQ(IeST6zdlU>A|z`15K{;hqRT0jX&j&qxokDoC0=I3z^$&YMBno1phEzNE^ zmnlO0BO^2uDFQ`>Lz?zhZ&Hj2G`4W4mRR)RYRieko3}hAK)nQH6uc^6iTn}dI3%bR zD=|5z#AFTgP+e9g3|nY}z!AjrG3Ep3V7u*K6;GEt=2ToC_vM4{=cNdY^PFs5?Jt|^ z=3@M}DJ~V2F7o%9>4t2))E(K$sPj%4iI-#_MKEmCxwLlRstf$fkjn~3F5HOz<08MW zO=Be?+!V26G(;3m4Naj-Z0IgTFF3fs}D*yaq zsOhRE-{vL{)t34NIx&hrrJVXfJo<@?*^Q5vsPhv1uP?(?d#r^s)k4!xgc$cH+2Jw{ zTf-yZxACMb)^ud^R_<_~qt!jX^w$)k7p83GeWn-0L*lgzVoz2=1NrEVOZ<6nZ$j(_ z%1%J+7;_!em4BudGh!b(XY<~Qyyz=PYw4X6%eC58ax*g3DU<6O4H zE3rP_)*f5YrFn!?X4Tn!f!%`0t`4Fc#oR$)p(Fj!__{^>9HCoEw{87&_+p;7@b=#j zm^di-k-bfPnURFUp2%FoOa7Mw`zq3Q&uVXNNk`j0%%KR)dEgt&=|!qN=v-G_RIQMd zjfTTdRO2vsvaJ5-u*>{euMsk7QgUQifk(6h;wJI7*{1DLJ%D9V+x#iSz%1mi3TgLkYliC87snXDi_d^7zBj^c9H5Tso)oM6X9N=YcxDogwT z7AVOS?Ofwmk8iMUV_Yse9CK@-mEm)|v3Y`z*b*`*Q}TM9jAzu*L$r#^c@ zz~}3QYys@wysxVL2xA;Z0Y6UoIx1tl<%{vj;McCJaUwYZc0QWXodMJ(kd#nGaD>dOd!vUw?#~~G4&bnS0rD0+Kf{@T z05aKJb$~q}fH86)sd}^ycbAXbtLd1LE6iaCV4U6|fY|If1c)ksDE3DC-4bM<7y9+@ ze)A%rpVa(B3rnD{lS(G|*7m#s|K7v=wi>iSoIR2^6dlZc}qMwCj!Q5ykT0To|nA(B7ezpv44ty$nElBm{gq>jcNx8_@XzC^;K78$n}NAKdMYBW*&hCS_a}=v zxTUi__{oF@rw#gh?;RgHfYZ+Mmqy&7!K#;e)c&iliMIcyeMU62v^NtCof{ORH8=FN zMgtf0-q)!YsCj8zN6N;*(PuvC&*mR3o4;V;zKa&`wP_6{Vxs)fO+)0%foSl+w;c8ZhgOZDPestySgj0;mp-95z467?0;F3%T z325AT=IbRp$}v&siV5OvUW4X9Pi^lF&mSQqBsGY^1rON)*felZXEm}HILscW8O7de~cbb3%XvSIeU zEYI@Pk99YFcXica~@U>L|gEP5-XDzWcz99X!v2pZ}Y*WEpzR)o!Tk@YgKNb?SzdRS|Zz8D7~6VSQMQ1EE}E=IlU{rU5YLa{bO zF}VBts(~=9iw*|xnYSsK3;@gsE{Wc@-k*JVk<8R;RXl>hj`nh$BJOa8i){Q0 zYvLJI{M)kh8G3LgCl+xzyD}|Wc%{D=Yjk&uhBE-^v!tFp_-As#$9guGu4u(){qFgs zpPquG^yQbb{Z;EIsH~%(KnmG@nQ#L)HV;QbS5i4_eRSuQ%3NpBWJauyesiVY7sRJW zaZ(aD!{Onm=PLg_FQYP4xgZIrDXvcUm)}WXi*t$mSPNBR;7px+lL>dSc8|z5nQVxs z?>0n$^tg_i5onAbjnTUnFcX&h%X7s1nfER??-%wFuOoWhXZ;V)yRgVYyug8VX<*vv z1D@XL!3F(rPPG5k{-RfxY*d?Dgd`q_cdPfdu=aoCfC18ACEoeppzB$wV#>05FYtrM zHLrxeYXy|;6LEhVTMX7Oc+Ta|`G?}0e<5Hv&^``R1TmyRZIcguvv&;aV4s!mlAJ#7 zT_$$6Y1+Z>$nJ{1`Z@ndaOT<1`Mo8yK@1R=C$K)+=kxy2<7%Tgau`&IYLqkeGT|Wq z4n-Gz-e1%~nS_kuQNv=a>(kMS&-=aUp;ggOKkr|LzHs(6{>%eP!UWKN5mGkCN{fqh zl#Mfpy#@RUd#g$byp2pjxXqVn)@K7{d;u*{_I(w)3bWzCzAOQ=Q>)DD@P1+4EMGE zwD}^2Vpl=#;??!*t6tT^n+G~lFgGu_qhoe(B(&pUgKR5b^co#*RHc~-#ee}pi z|AbcAAooX0t_Lz5(J|Ni$1|Wc*Bc=3yWY+a9J!vd=od%xZopxBNiq6|P1M1D(+&Qy zAl9`v_|tj+TQ~R%=x_7}e?Km>ZuI9)0Q{?O^xx;*65Vy9f9x?si4)<4K+^QINgx^h z%?|Lx$z_7p9-kTi9B!d&!l6XKMXpPVOIa4wG=ue_shW!K6U-(J%%VGc6sbBP9 z+Qm-gS8(23rWn{?(a|^g|IRXgKNcnP)3wNWP=BT7U-;HNU@=m{l=s=`Vn# zK>EFvAUfe@9N$kxm)&fOcJIx$Xpi0OA5pm&3BJU;Ka0u)A{hG}1|$q|JNJTQG0(Wg zpMPe6zGD(?CwmMz>4~9{?}0eq{XV%@litTsJ?VXvAc;!rxEPJz;-4^u$d3|~L>(0M z#$Eii=#`uNWy_yr=Mcy{(d05c3cS~bgRe`cij{WUucolqVugKblfv$lAi82p)5s=& zPOTfILuz>-@|X|LA)xC_7j9FciVT2QyO#}%2(#)g?s+o7Ia zo-)lJEHIrv@e#{uHD2A9)xcsFoT3Kc%~o*gK690qojFi5`yN0Q3t#G#G z!loEi9aUNC*7EFv<(J47HA`TA^vLZbjaZHcu(BYC%WDw%#MqdAZ}`A@YzZ`56M!Yy z2*|R{*k~3Z*w~DWw1E^45hN-BI{=Q$jeQb`!M)%)C3`}Qc7Ti{@_*4}SR9Hx z)#Ww*;^>YGi}}5!Uks>Vh2t060bh6c4s}Pr`&en3_wMM-JN$iTUaH&(jbB-ejy99wk;Q1=(yJ=U zqwC{ZG9WO*73SS52eIMFjV;y8g7oZ~!4f&K#oQQc;vWXLwOo*B7+m*p4HJ?Mf=8H&ThmSx0fElez2QAZ#v=QTgd+BiI@wOBrruorGadHr)( zB&rkcu{>B4s($HX=l~>rQ$ERDo~%e*&MS@-Y_JqFXQ`t@!>u~TRYDi+FQ&HEc5e(J)|=aDtBr%= z#Q#kuz-JBcA-*$b*Kya2gWr>tRnYS( zt4}_cyalOWx$ED5O?YHEH=VRi4)I%_B5js=qkcMN4atsYOZP4Hd3oyAK2oI;<|pOs zf+QqBLK#f^Pz5C{+t$J?Y_w|5>QrFKGCJcNj1tcL8|Tnk;*4x6x6XC>y{6aXcXakQ{4VmO_1Lr~ z5RUUcT7xfHFk!Hd1%MD2r#{u^n9$Jfb8%BK_JV|cNOh1{Das%B(X*7{u&m{ZYcuVj=uJ}9eg{hb zW^8>+>mcD5K9i_N*p|uC$}#1VA#DA{1`P@g)yTvqFG;JaZ;LaHCQ0Xs2M4JupIIb7 z((dqyRE+h%pdNJ@UsW*)nP{xhx|<7oy-wApRIw=hnqI0qt@s%D+*|ol!YS{7dR|;< zuh)GEe~GG<-@#Q%9Li8tT8F4YKx+=DgY#5u!BJv*2}A(G{HSr*7^-<(V>kNfcRGNh znYeJ~IA%O=2c7*vS4B_aV^6k=XOo^OvUQYrPLik4#Y%4kn?{Tuw(% zKijd4+8Md#?g8KhQ^-LSK!&&Y(b|F76acz%1l=2nWFFG2Ordkv?L< zbu5DT&)HB({O2MFrKh~VLWPHgHFi9u&84=#JJ224TB%0|@lX6@W%;1^FYA@fSC96T zmurNZ8^O8d1lukTw3*6OAe5(Fd@oEL?mew5m^{i=cJPSZhwOA+3H?2(Kb*_JISlYn zNtea_aZf#8d0?9P$D}^L3 z4f*)cu|N_8h%yMAbq?>22)Lw!x5kJd zw?~yT)5TD_%AL_hy6`%q`@ZLQzjffZ7YCMRkvTzWqIr;%11iiEs&BjmD|5j%K)uAn z7Lavd_|2;bnwvX=@7SYFO64|uAjOn8-uyIY@7ft+<-X{iqtZ*}8kQjsz618mC#NUg zqru;zVwLdW&t1{by|~GHTiCH!o}r>3(;V3|`rwmkn0JTf5n!s z2yCKzYdhbJ$tJ3xP4h$*udS84-9)E}A69#pcxTy6pKVd%Sg%qgQiKBRX&IrG9yeiL z$Ne44!1{~}leV)7vvq5?3D4g1geL`M=5GRJi$Dp;0j1@|0T*#(ASo@Y8Io*P#A{Y9 zyfT5J=Ct>mkRII2ant95A|?06qrcoZU`S_pb&F`k#eUH9A&7h|2bE5iR5yghn!JKK zR4?y2upGV)mOES!8s{QY^kBNTueZRx&ik|+yf8d)*SDW-J;?|N|jt1E)O6v^1 z!E*N9!S7q0Yvl#DjXv@NNbLo_9DgWyQr6$%)UI~Qvr8IE<_hzR{e{L#QL#H7U_umI zG-GDIl_1`9&D+*N`}Pr+I&X5p{r)_^3k#x9d^X0)upe)Q$xEYCQHO|@bWq@AUz-J*MJ7sn<##kbbAC_02> zT*nbTZDl-f+E8h2#qxFLL_J!xs>u9c-T383cQiz$Ma#5?CWFmOUwNl!-ZjlP-( z+37F=MML9c1l3cs<<4ZZzD6r{c1}E2;(Zz!)`7-!mTD95u$+Ojur>L;)jT`TRJ+&S z%IhvWyiH_sn~#fpk&q<0&7}he-CS1V)$(PE|)Z7@g?Q=X?+nST42YW zCM=6vVeu=njo?(px3KhNppi9KU*DCbx9r982J!OJ-E*xk{n#Mv`_|uctS{r#`ew@q z*!pU;C5_hYDNV*&DtBvSKCqYGY2!OHTKs_Dw^#h=xt8_7=UZ0QUWfZPt!H%218mRl zF6Xs;VtQG$YmBRFswKzxl9dz712JjUNj(SNvsC z*yCkKn*#+;5Prs;z`IF5D7*D6S#Rs$#iWqciRRH+!K-nUy7AGBqYwPp->X(P0?msl z!_aIB3;(&g-Kd0$Xj+B^s{fF@d}wq8RT6_^b029cZ!mV&$7J{vs*u9LA~C(T)tS@o zH7bR=660ckbI^w@f*0)k(V(+*02$bsXBJ;;G|94W@K$J;J8Bz5LhP>C47 zK}5b*tHW2nfz!2H%4+>a=W>{u3Q{CZ*fzLGh*Q6T$K+6LR;%BDksJ-jf2G75ntvhQ zBZ$YiP&%DKa)!00%ufvh4IDjOqh#oIgOo6AN;~6H76O%J=_H!>6Shf9We3va;7M)EciW;Xii5q-f|8L@BHGoym;8X8P(Q+_xcwGyw@|YCgA=5 z9CmN`Ux(cro)dO|m>hPmk70K+u)`c`_MU`Y!ILy%*TCe?lbU&4@TDi z+dD)0a?OX-{D$b9 z&lhIb_P8nYi+{v3c5v`taq@_EWXgn;U*}(}K%}uas>_;wW%e^>sWB_r%U}2uHhx>( zhNWrNWr4z^P+H#xV_u%Pw`GvmjpbX^u-W)MU62~jHE(_a9pO)FYJ*3L8EjYr{>s|WJmpwX(0?N-*CEVfpIVNpk}?v ziG_wohzDzX!U=$b-5B6deH&Hr%yVQMFY&fmIL?@9w~M0E8Jp%G*@@yVsnXz$x$u0U z#vv@N`D|lzyp9<1Mab&btlW=+z;WsgTOb}SXgEE*HJ^B3898z2GUh6$E;{7 zmPq)Znl?}a)jyx$)MO1^HUe1)4^M1Y6in&^wrG()r1+4&bE{CWF~nDcyiH@oOVt=- z4E_42wob&}H|!35h(GE~m%5`xKPk5FHK0l@hyyNFY(_YI^P@Pq!^JTYQby(SlmC79}~IkEtGI+@M62SF80KinaJ-r| z4F!e5hQhTzlqW742sEC5bqMjQiNktFavwy)!n61cnozd*Dy@10ZDq!`)!aVw&pZtB zE{^w^k14!q!yC2p034Ppb7dPD56ZuYqYb>nJdbc?zi=iGi%7phn(1c!A;+au2lN%& z(x?bT9Ik*pN?Z7{S3JfB#i}?j8hQgcjXABu^A>E0XLFk!u ze!;f45goP7#@XUOB$#2q6k@VGgi7m}K{fbn>6L0dT5FuaX-cm!qiHB$gbjsjeJCFp zj~OR4V}^zmW5x+$K^sUM(N%8w;t7n)kVQF9F@_Kybe>W1$mhZmhH5!Aayu(JsYs8j zT|tp&n@Q}bPFoLp+e#~dd-48BYA4%!bkl|D?Qjj$32PIOjZX>KmQ@AoH|FwHa7`@2 zk|l&|AZd(i>^66XPmF6{@obis+f)}Upqi~M!$P14strMcZ7%#UYi4avL)*@jUe1R= zPNP@x2w?(DysFXDlVxngl@yGjuqJ=3yTa92l*dGqxbL4#!6G%*{IwSMJ+9nJ)z^At zLp&rGxc4{{K2todV7We}f1^D~eir$#c;r}CHo`Z>pdD{_VSB>gnc z#yJ_lAF@{sk4Rd5LCHmM1T<>NEnMrFH?^Xr-iX^k0PVaqA^C>e%5G`nwWCpFnlp2( z+8tHT>~vZ_OHS8Nvco7AI!7i#NAkj?GGq-V)`iY%*GL~Z!U@VoYL3V>(PGq)(Zhcy zOU?QCxj|S=^bx7cOvN&D_Nr&*T(5W98t)u!&tF>~{%v|*Kc|*H;K+TIuNp&TL;co9 z=1!v!bnJs+`Qr#T3X1g$-f#W?rgQh9T)?{f*0s;pjdl(DTu1OBKrwg!+e%&s0uKJ4XUaD}1bx)%tl!A?&iL+OJB$Ns0Q`nO$mpF*@Vi!c;h5 z^v}Qb_p6CNN~W1eO!y?fEhQKi(U6!ozs0jvSZZ_p8>ViCl19JTs{Pql;g>2bFZ*MQ zl4O?CnswYf#Vk|#dy>~c0M2)-{k2ZCyc%w`o>Y!ZJ=_`h5RMr~rk|}!__5;w`7uUe zMADE=&ojIo;NMJeCwA`=Z?&|T|A)PIfsgB|?){~atk;aJktIvA9mk`I6F*|bvEzg` zfiwr2@aTibr!Ovjh0=TfIP{VH`QJ|qv}Y1r7}7wM;xeL`q9i5}V5lgesMLS}0|OWs zXlSR!Z3h})py}B;?7s}Dp&*mhOwwAuvj3b_L@YWOQy;0{kCfF=$9z+~_-DI*?j z3&iEjYJA)j63sPzP|gBy{=-IkGOEBVJs>GZ+=oSaRGYNVS#C3SfXDcQMVatPRDa9Q(O+f>ATdLR1<;iN*xm? zv6)A|H0g(aIj?eJweniJa@@H>1e6tYCW=q}g1dEH?k+UlszzA_=HBdK#gXa<9W0;? z8$q_fSUDNlq2)a#bB*>4GuF<$4r++!6@byVgLLJE&JJ`FfhJt|mgYKalc!{!EF^$v zwwZ|_0aoLN5yxsZ$S+=PNw$ZE(x!h3GD{*%X^>88k*js4$J#XgMan^PuwHRVfQZtv zvPomiY@GJ{u$x(7_8N;QXDaXT4hNmpb7^vOgYs#c4*Kf?Tqs7jyw!Z;#1ZxlKZV!Q zZ5-a5sHUmr`ru^O4O@I0BH!;_5SW~BTkn9?Y(v%`nO?V;{jULE&>jUDJR$v`wkr}=! z^CV3sESS?7;=z*~z?R-4cau7jIuBWk*`i|@bCo`&!zxavl>Wx)f(hp~GWiKR4UQ?B z3?LDR2R|@V*D(gWUZ+d1jP6y0Jk>4g4{<+nkO~%Q7Y#8ktpMiN^8d1|s75GAO!{bA zvM++=BUs?jzK`OUhr?agiaqVZ6vYmKU0$#3tt=9tR>DE8L9OTkYXaJr!Nma_6~O9r z1)z3k#i*!I>Q8eK!f5YF-bn>D?xPl+NYTvqW>NDZA64o)({~lgINQ1z-;dnUp~G7(Dk5}9$$ zaSKK;fChm7f*=larU@w+FEpKUXkmtgTZ2YYXCP?yuo56&JiMG~0-uqo=PD3)TAZ|V zZ!6EZF=n?XVpqp?cxy9ni?o8~m}vzeQa>U|(Gnz?kpHo~clY{686wHv+&6p$rQK5S zqX1r8A55ob?xbaQxC zH*V4T5d$SI!J}-*Rc00iMPFpO8I)*_i@Iv7t8eINo+Ld^rfb`fayH}Su zC0kMSPKlQd3cW^NzWyM}Bw;c0SBbpP9Qiyi&D{(RTi4Ru3sQ;s#%=_E_lz=pRx0Zv@Z9h8yib*$C# z9W#h15#LD;FEEf2|ukBh9%~mTqJO9(+I%dKEq!>!42pDWBh-? zMPzur@}_+Mb~)@xJ!Dte!WMmoC+4N}!tV|J{Enj^-~e}T_8!jhC@0j9=A}guv?CJ+ zhS;z0`Rnq-rLTT(=(kdT^}kBLbeatJ+%2#(^_!ABd>>Mh8@`K*^6m%asLPy6=J!b!)L|pEYau&Vka6FYWo+UO=mtUj3hV9H!aB&k$xf^wgZ5 zcIj!Go_6Z#VmwqNj`Wbg`ax>S?>4cIfFMo*)4a*w+RHbY8#i(yu}Y8|S^* zCU$5;g_MgZ><=0S**A$vR*bHtj=A`Qw($m@;Uz=*7A^Y z(ts}?=!x7?*Xq!x{h3v}czAoVpV!Mp>!!%3@(|zw?#Uja2ln1wq-e5%ik0mIX*mH> znP*g%9&xAsph|fUTlrRC>-DgmTG@1sqKx0N03iiDaCPf&nNN}+u$m&+NU$TXXaY8M zTkk;z2q(*X)UVfl`h(hxfkC>ZJw}p2f$ScxL5>03Oa+Pa!EE^bzWbCBiNPQ0UD*MQ z0W-8H8lF{p5Wb7O78E|D-e*ZO*F}g_VrJ)5cAm1#rn=19#4g*5h2HjwUF>E2YG%TeX*qeA-wmuoN3xZDsw!I>8tw zVFHpWkko<12(O_B4_;H?6{+LF`(_g!vCe~c$GRly47^!uzUja#u5;kc*~>-*-t4;U z^$x!I6%C+htux+HS84PIBQH4x7GR8ikpa@ex`WyOEFJ7lZw~KA_yPB|sYM=wCHevh z!6F5)x0&GMD>@PCmeyUBT2_TJ6*R%iR3Pf#Cb+y#6TB@s!4;LQ*+44l!X{Yg3VW*h zslm(D7Qn-8y{pNfX{}gW2mp_xf^roFRW&W}n(O=&7TuW_PyYrIQ&Z@JCx~^-_EArG zDiu@d%fdPS!jbu5+*CpJQ*#zu9*=6SPougpIjVWJv0z=#TSG>+WqU!Y9`Da|WU)k9 zk;MXiFRNaKVt&Z?{-=`oWkbi1$oP|3v+6LYT9eEA#J_+Up_es#xx}Z9x!UK)TwhsV zCwiFF4v-oNkeWP)I0X9SmVjod;DM`tg#FTih-O_Jh{vbFrg#-*#lcqW_cUuS8xd@? z{T{aYmGzfm*h1(3b@*BUTM@n%?TH|jdj#8Z3${P~6zeJNVLP$1?XqiIpE%$=Y!!RC zz^CD@9Klu%S2oaa4Rj!`mVm8puvPk}kD7F%yvN(e{|3Abde}~{Y`Va~2BfVzfM5k1 zVkIcr+gb!%VZbA9c3=fjxSU?eNrk+M130j`SC2J`I*c`}2PwS^dhqnOXe*YUJgBe# z2520P=0&aJG!Xhj#p1bpU6U|bPM zq-FuQ*+F^ZV3RG_{_}59{4;)ZPO9c*aFm6Z+Dt!#3d z1snP6V%Ta1TiNo}fdJZafVLG_p={k7PyzZ)IE?M3;%dX?&~2+2!E)cUs^A9%cgJM zI-Iwe8pwVHPQ+QAj?R>56_G!bzc=+98WM5er%=tMq2NbM;w=r?wP~yVkh?cbz3e`$ z1nzSCksFlU3NGTorJnBUq{|a+T{>9-wc}hX3vOf`MO~r=oh~xul!s6lIXUTmO|w~U z3Ux&YamtnMH7%sB_U`4Vlvz3?zls>g z9^nVQYY)DCI|>W6&P2`;+m3=E&a}3pfUj1%@fqp;p-KL#(TPLUtFd*0(?bW@A0%A2 zKRA@;psTh+`t{~v;h*E0uM;CEppAClt|y#U*K`+o(#We2Zk=*=YZS(Xs?4U1=z|6` zO|lDyd^jyTE`)N;kc+5dR*7lU3DUh*rkI$z5j-%4`ZnjoY20Qj<$*~mOhY9Pj;UueKDQBD158^z^Toa z6}2d8TO!;Ug@8}5Ygg^Xa;9BrDN|OsTFN}Br=of_is>68s!i2?Fq7mB>My8M6hoz= zhOtO8j=40fpE94ee(D*|m76OYm>U+1Nsxe~0VIWCpc2g$_29vqXSJccHk}A%Oal!-omg4Z)s)yiWNZ>U~8w;CTR@`A*aY@KLF=ce5VoLc zzuSUp@Is|#x@Omt;eccX>*E!6KR!9P!qj|`oA%~tL$<{{-&%D?)h4U%gZj2r7rtMQ zw(OSlv`qT|AI!7k;MUg(7q*j7C^h6|wkXP~P@zI%dCeKXWooQAO;{13=V>_V0iQlfw z1<55<9HETdkf4;$q;)f^pGYDV&3v0GbBwdawvf&BpeBPwYsK2a#W3Qia>-uKs%c?o zal}*Q@{0Pl8b+>7mDO!!1jiy`OZBqku3)?mlom(Q8;K3V*`p#ge$xP}1b#*4Tg?Wz zB{{&l$~Fv&y0vD8&MZ!9aN`nU5xC(NWr56O0;4g)0@X#6*iq5w-}DvJiM^q7gyc7^ z;j6=7-kL1x6I(XxhQDAh3w+w}=d*tJi!1BPZ@v;F!YE>pmU+g|S;)FYS)qbolz+Cs ztsx{;Gq2DCE8DJG+spdI2=lPj?4`C347O?nTYW`+;iQ(mR-68l@iJ@(GL}$Nh-#Le zKbqe0$mRWWSCzq6JX&#o(>aU#H@*iu;)+Kt?Msaf>2+Zd;svJ)wcwNZi0p@<5KdCC z+tyAsnvWu0a>5OuHmZrDXQ>cT^xUYH>}FB4c=Wu=X&WDUQzu#RzL~DYQ4YmBLRlDn z($DrRt615Zb2=-3E?fCi>$NWfEGOGwQ3pYFR2);QL0=WMQBfO#lY`~br^nLMVU1V5 zH8%CxJ*KgGTVtNVh-h?m!F|pHs^-XA#HV(mT8yTLr~<~!FP@k`^l_#law8tzkVF|R zBbVo_6>E!Fp$%@O&D+bSnzjYF!LPK1&IW8b&2M1$Y6JZ$hz8ch=V12zHc14~Bqqr? zo8n`vRRU_B)vLW##!Q&ZuubN$^c^cloC=KzXT90GxfX&`d~(W?J$VC39W;j=96Uw0 zS~t}VrG5V`J(ZfhvNZZk7#o^vOG%aj4o60VP3&yNDssy5P-J#S(Pqg`Eksiv6T?3c8vY7tcc|4y!4`wNyLn?!BoV+)sGYa8fZ^wyw~);K$2 z@fvTCzNqWxGnN-P)lui&lf$GMf~?L~2{HI>5B?3e(#i=aSv7Xpx`Fo z!&l4Nb1Z!~?tuU=N)YzsdLY)|Q0d8=(o-S3$^6jK4>^vGN*~rewKf<%M1$cuSfEzK z8Qx4+eO}BR8&9rNAy2Uad}@0elrM~X4a8o~vLWtAJRYearN0NNYm)E?fN}zW3eTWo z+<6uigybQ4+kavmt}9)!Ila64^QB+hoSp~|0X6{@E1}YH%oW3Ku%%fLZK}(n5viL% zqF~_dRPjCi8UE$vi!lp^w!5(tmmINv3}oa<|+TE#d>~|d=8TsuR#hY3~WzY zr2_Q4-($GTn7PLkyez@X&*t!O++6oT`&eTyfLB$Co@5#LAj&g&KiuSBnc%aFGu-oY z>#wG_p1Vf$%0Gwjp7dx@w=?ACDlP+(j5K=1$T=+bj-4{AZ?`F3oN;-#X-8(*PLBe>S?_Z*E-M{p*5k{pFqBB_2}{*wRwwq*9bp>6YjAQ8 zCZW5~iseR+>KUo43$1AUD2*fBGqMSv$8WksAhUrp*_Lpd-sWfW!^*i0)0s(XO(wBK zC|7r#v|(~MYFQ5ea9rQ)RWI?ahQFY{<@*{_eBISupV#}_3IkVleTJv_UaJ#&UG*R$ z$aJq2Af9p{_7b{J-uP7@W#b(_7^bQ-+?4dm9xewWJp~a3mpLdNRPS8ZBP}3UxC{)~ ztwrLaMbj9d+w8xYMRDvh!v?qCxGSnw?=jg^Lx>$0oX*^k{&dQkYOxq7!}G#!+8B) zCRF?f&auO)0lchFj95R4n!Q}&lLd^!DH=sRYBXG%W=&r%T%H_;q;Vqx9mtzJW5(=t zdnZN8Mnm|HE{%WWV7 z-`wS>%9nf0FX};<2mB?TgK`4u4=n?AwCi{>>hEkgsu8BR8dKSbtsp;tlcbySUJpp=yXFT1PJENe0=@4PQZFlm}zBMu5hO?sC9t}5y!ag^I zS$kQ(oGTt|48?tZ40Gq!svw-*m%j(1ncv44^svA~ut>ww^5^%pZ740S(QKFyG9nYQ zyY0Ik@e}b$cAR{i*!9C30rf1Hb#~SQFetv;ZAz8b=-PVT)uK?e-F@s;YZC{?c;Wg~ zcNfvB_qZs=?pK!n;NtWJse+lT6MGIMv>e&0Q1$`MlTKVyTcT|w-qv~UuJpF&%MPR% zHXsHt&q0-K9p~n;;4}1^1F`wo+SOT9(Ru4(kse&IBr9K>frA3r0&ttJB99b+^_+iWzj46yoOu?Bx$`&WnhpWc0tokzChGkpP z4~nOvu-8hTzbw5eYX8#nXumZ2|KVBdbDlAlG@w)tL<`&2G;hiWTJ989_VNJ?@++NZ zY@TmQz+ zY}0sQi?tPCZO${+JP?X5Ce?w7_iwU4z7#6V=P2w;H`?GZ1;>YHUaE` zeV;1EY1FBQDt{%(OZkSk_Q?lEf-^sT%v;(cH)ZQkaqoLMA zx;e@9&WKrU1|nu2Bh^ElMvBd;4`0PptxMtOIQVq&cR0@;65X>;VLq(}!^u`E}j zm6wni(>97gy$!Q!rt4u_af*9B?bk-rtqm`ccPE!#r$laFdePDtr5CyTTVhhAE`kuc zV;MbZq~0_lf~xTWi^RnT+C+AXu34pPifl*M5L9R(#EQjGf9=vb4?!Ck+VE3jQWd^+ z8;Fb#3uk%T0x6t&c9+kb8<>EeFwHj7j|T9|j;(vI69 zK7MZPaE@%h)zjR-g?CDYBb#C)aswuzN;WyZraC6b-XwY+Y*TsN_ zN;I0iGldattRO3||IXC-^a#1~Msj0jJO|Y)(ETV1H+LHkmD22fj2re5qzki?pb#Vn zNq&;%Cxf~rNNfYC10qtLa*Ckb?e2U#LrL0Vrp@2ZHO4j9XAO|TF_zEQ(_~3FLv1#> z>Kx~=Lo!=vXOk{0;H)NuqfH#mdlzXDaG*F$Jk|0Hfr9~uip+GM(gtcCf@V7&dJqE+ z5$r$sMoEInO*xch=L90-M%=fgUICZqw1wzP4)KCWzG!lxz^5qDN!{54>P7f39#9zB z9BUpQmm<^nn4|1lQrUlq3J;A$+q^|_Kh^Q zovDY=D#b*}ZI|N+J(;(-hub`x+RZX?EM2Du9v{U>|vWG^MJD)-88A+=+q?7Nmu{cw6yccXOI4}l=_h}f?a{Su5wYJlgdVx#tXx~_C2rp<5MY|3p!C%rPG`FpO4FBusd=@|UcX^#o?cdB zD*FSu*L9fs-0?)8=YXedc4GBaQo8Hv^wbS&;|X1M$eo45W58C!DA=)s`ter#l7-Vi zsl%loT%Eq?Mt5kB>1zd^-(%?$?;>(7I?s4`=L$^BSyu&pg0h`{>%v|(BT(md zMq3vv>o2QlxMJcK0V4uwiDz~%1X!8U94~Mvs1R#7xjtw-Q{MdVlNl? z9AqmC8$Uy0J z*QNU}Uf6Lk`fr4^VA);%Xi=Dq zIxvn`hZ+a8M0T`o$kcb4m>kk&Exj3)oay@JE-5fWv0U}Xea6z9))|or7Bs|oh)UNE zuUBccY)C*=?_*A2dQbfo({x`+iRajhm)$o_90XNuZOm(J#0Fh+7yYqL8oONN&F(gt zUbC|C2Kk}gw&MYOeWj77r7wJ}SBu+BcoZjW&ON~7OWFjEHxVBBYT5ha3b_^5e4uJ! zw=yEZaArZ4&844S{1;3Dxsi6e4WHuf{3gqkFswiTbe!QEND_rwMPujy!aVkd3vr#_ zoy<*Ov4BVlr1EZ&L1;0q3%jYdXq6ZE^r}O}UM%e<@qXJz|1zc3!s2d%4pg&B!PUg# zsonXlHjJ&JQgt2Ftj)4M+4SC>bs9$9-Y)ZL;-|LTGv3C^28x%G_`eODW&)hTr2?!j zpk^uPfjV**YcrPBmnMM<4XQcHq66y_VST9=*7mX)0X27NB&^#Su=>jsZ3k)*Kq4G1 z8Bp_=I&XNHvK}s?32tgd44=HTbE1NZfv}`c#F2-qYA+Z0G-OpGxN0lv3xMjSs5X~A zeo_h!i$nDZS$AKTlzP-G_DMh2Fa*|xK;vb$V+;drUJ9|ApRfTcv%cjg71iO1IIO=Xi9XafTO4=j8<%lW-_o*(#QPIHk8tLeY;`;a3nEs& ziv$SjR2E*zJ)O!h5%lcpaGU3%YAjJh3dbTfqO%-JdvumVb~l{M<%MZ#f2*)nY_oZR z7KEKnnzt9lt?kC<>{cqmiV9l^^X55Npjd`)j7^1kr-<|Z!cRoj=90BB%crro#jT$0 zE^kc&ZhK6%72VOx61|kwqfEF;qU)-RPp2gP+Md;t(P`EJH5?Eyj#n&d^${A zh;j6sXUjOMt=8w#g$z9Evy$kusy^$jK3xpBj{tjU<%k;AdR3ofAG**Ed8-AiYyVAg z#URS!iiNN_5Ei+{qA23~-$POX|t?qBbZkUMih+d%~s8scs`$ z*8+Klps zuqqYVAQDV9VEQr-oJY&$buH{5UT#8&J3+IUZ3C@tk;JKDE?rgtHk8A-x0@V{~5q8@8#eu!z zQ~=qg#J<-~ijW_%8DfPxUwTb)v7n(~aT;ncD=;WbYa=@QYKV_KXVd3OZf+-Q zahkhqyENDLPv=KYr@I1|2rmqlqaL<;8?$`QP2)iz2pugSIYOSY>1kv?UbAUSz2PI; zADCu-2VF#aW!lJtJb?%E+Jvu66ECne`+KAtFt_{l&?EUfR0!jBEVuOrDcC_32;q!c z2e~nCyw;}kn{1)4i9e9MNvw#yTxloFd39Qbt=YA8pb8qa%OSx!UtUd_&70mbeK5bN z&5|tSMWh^~o`&>z4(2H6m_T~~`;mM;e^#*a5m>=|zO?t}((5^Jb?whV3r=n>o?jak zKxsiM(#{hmEr7L9mJ^Vd1$ngvd0Wi-seC>>AJA)ts6YvL=_*WOP2)Hx`~zcErTYS&)mkn)U+djC+t_+t}Fd zUrw?QUV72Nd>))kid_^^yeV!!nE#DjKG=EMl2!(b+Yjb`gA$vv-yD)8xPQBHiCO$6);aD~URrh{E>-a4iNY ziaO&hsM+HJq(~PulIaf>cGm0XkjYc9WGq;@HIxbuY=`zV)uK!W&sW1wVO{gqw0;&@ z1YF|kL-~srpW{*u#m(Uam=W8rB^(1DPIFN{9v^ctdvS|0vVUkKarokMB?+R3j3j2M zp(%o_5|J0BT`x!vuT3KN{lXbZWXfXEX1a3uO;KdRoWW}SaHXJ&Oqe@YY*=jAOxGV@ zEV*gt@s3`MKm+^NrsgGI7ZRA@&-sfFW)B-ajxjn#C!B+}3SCbE^19rnWQ+oM0qw0A z1%K}jp=E@ajYY8v3m025uEmQ>|M-hsvr~V=FQvE6thK;m+W{|;BHIipXIYak|D`h` zT=5%XB_~0KVWn(ZM>|%^VC9btD-WDAm8fAfcN<<$P<7+$6S4dtUCOX!6WRmd_Nj}tmV-t1>m$yYy~9R+|0QQzA433QcTNk~;UTwB}48vR=IdF%c#s#GZ|vp9ARxWsfOhz=E#q ztfA`USy2^XiWxTJs(guLG()W<5RbL|52P-Z)%=g-0<_VeRdKWl(7pn>gD%mcM|iW{ ze2JWJyiVe4CA#907zDoL)7P+*tsnLd&&$9G36PV4LvXVb)1X> zw;^BWM=WjuV{=<1CyRVK1)W7qatQshz&qwcorUpQXcf(uRkmQ~^Ze6Q7OmF?j%@LCyu(H0*7H*(Wdgrh1r ziWp4fsQMcnF74yT%9hAcb*dB7=BUzSRQW}2DgyXoHTg`2)z9lSV1jsE5uFZ*SJ(1JS-$0sT3*Y1di%F- zFDe_`-SkyX&1eynHxk#PlQ#{D`Nh}fD}T5x7l-9{Yg&snec~GSxjYKoHqbvFRC#NQ zuNfutD3*XvZ}s3)q+EQ^QlvcRU~I7$xZ1_Iu`x~?x2}?c*b|tf zrzSmdMWySBdthU%a`_`HjVgXDpDwUkqNCWyR<^!qtM@Aq`pmDT2f|ZA4eY)u)YKrMPImu=zfcl&eynoYh|6yBU?&yE?7qe? z33kt#OSDKEWi8=WMvaFiLu10La$lQ3vE50XwrUDK@>M)6c;V{Y8fjB{TJMo9TsOlv zo_`Eh?UQb~y6aZG;#LyB6!}B94S&J5NV%d!3DWw)ge-~qK0UYizFl5P)RcmzMqc?O z1O1%Pp|)ehup(%1C=+)0y!Xlnuf~ed6E_J=_(0yTGTxU5?366Q3-`_n$~H-2DPMD)U1s%wCYlQH`c&iG!rvj-`*r8 zqCYsinSRY#6Wq~yJj5danhysiIIA?>7ZK^gcWwtZ-D%;Ljkx9}3%1Xvwab{}X0hk6 zl-V4Cs?VGTkV{8eNfkQ$f?4ku%mAT+h-|m$5hen}A%bir5g?;j)bS0zoBr5@T|u{79_B*T8ix2o>*$W*UZ@GWS zjJaW}(}9Ois!nuBn-ZL%3l>8veX(anCAlOm&0<59;se^IGcV3Kr=w*q#H~2Sxg6&= zhA<+Zqb1YkFeNM%Qe}!^AC4ul`TJLnUvGvR{aCy1FMb`fqnWNFo3#eyq&OzZhM*TV zKNbW%-0tX(2s-TuI`fziG(cNsOTNp(Cg)MTe|f<$5@)ugfMvE6dT=^F^30YQd2~qF zdlo_jiv}3qF<}4xA53%XabBkhi-C;jkhgl@p4qbP{5~bf{{?;6);aW%$v^_b@d5sr zJ~}0kC5$(bu}rru0ldUtEzt{KF6v%wF}pIqz}6Ft^)S$JD@3^YEW)Dd2F9nSNBBi~ zbM^tF$=aD~yD&K0ZQ1NItiqcg7Hx_fKg7ce#fR$xu}K$^=Ea}&&Dgwj$>oQeSJ=Fw zds{MYKe)M>z+s(|A{OKfjGN(G<}{tW&%oRM3f?9@jB`)xrhUW1?K2pKZ8FtABL&7Z zk39%GrwqBBjp1D>e?ni=O`GhEND0d_EM0AohtiO8XWyrf*+}6&nxivui=) zN*@G`>`Yw%q2#oSp3lHi2yed!9d&OvIa@Q@02|I_$&Ge-+77-=h314@i;!SEJ5$H0 z$O&PUXIE~Y?mxbOR^V1IIT-A@iC!2&ZAMZwBi=He%RH3}w=QsB+bKAp!Pnz!7BY#D zg0vkv&*0)B8PG5Ob~+zE4}!#fF=%o9K_TI0S>2J)3A=d^bS8kS!nVMPod2+}MIHoR zcNPbB_ZxmY9fOziycO)=vk0|dr(F%!vIsp5OiY0muruBw(RB91gZWJ4>!dtmlcTq@ zV_U2nN;m#CX9!6Zk<_;3=}02U~VEfrZHwPWgbdIy%p@-p|YmTKc%du3NeR+D_84mx!+0HX}lWX_KI5c`Dc=`De zwY!zjnYG(3-Svv}j_DSCoZ$+QjituNGySD?zmwh)R@35PeW>DY_p6#t2D|q{Br{!q zC$R|!gcCl<3r${4)Kj;*+Tgb6rW!YW+{=61bm^o+MoI$uGAEBH*+`=a@=Y!b8K9-N z{7(8eyDO#a?*hwx#vnlt_WqERad#ic7^jq2DiNp6E?6=0W|(*wuz)=&Rtm*s#f#{* zts*ds`qmX8_`tdECiT)=?9@1>W-!3>QD=4rgFL?_b)(s!!4QA+1?iXG5)=xrxCPal zHiEf~lSakN!R$jYVX}p`UDDyQco5-7=kuztKn2+a^Hhk!br(wM@R0_hbM@@TaJ8^4 zzs)VAX+!&!Y&8i3@m4cC-foLK76tQdqCi$q+a~wgd)zOi50JcSz!ria0^yLq8di`X zS=u7YiibFz8LJ_-EyfnNZDO&-oz9e=^PKe5&hy*&z|o+M4`V58J;6|PG|Huj6-WmfP&rUBxk1zQA{|;p(m?&FNa#hMFDYZ!WR5zq!O#&FJpj@vFNQ zbdfK`=5_n4@_lz>?tRW~=svw6KQO#Uud@fQ;5|ccZom~EUB}(zGGz=+?giFG1K`uG z!gb21K&7$L$W0TIx?y^qTsp@*d<$cVYn0`E`__Pg~4Rf z53a(LqyfO<>gEjxWhvUN!pr1RuWVqI=>Fvj1zqZSU86qUmSAh0l)#=jP!qa4ukD5f zmpl;qnjZ(VfTg>v6&$rrt7SSY|wLX$BnfzC%r8Y!qd`IH;xg zfUm$V{@64avi$F`1K-|5QG12)w)PMsW88GF`;z@_RFX?PPw&)arWrO z_ySv}YY$S3L2LRKrP&O0WJumT7aweddG*E@GE7vKtl7AnodN) zBs&=Ax@4wo^*A-R_o=1dXrfx7Uyb!>!TigsxmI=~IFwgT>_t}=JPvqW8?8T-{gIcY zu6x4q`ta|uBlJU$8(v@bn)HP+Ub%_=yz%POkDOT!U3SJbc@=o=pjIWAjaK7hN~Z7l z6dO8^3z>$HsTjKM9x$B%X2tzj@aQ|lT476C5yf#C*9&4u+OkBnNPWn@5!7=p;G`?a zO=-%)wl28v>Mk|`*4fsN6cMU6q=+ojhZGd{AF|&tW#q(oNL}!(PL{`!!x9En%BhPA zcWlhj1zR8E@?hkO_pykWv*{Z>(C}Et>~*urc=mP!{P2{6M-m5DIQ(vQ>iXf`>clyi zYU+f0%&40Vv$t8LLnF~33M1n6>w?+@wKF6lwR+9z1hP9B=a=Mf&h%)5Qc@sMqb+BU>4jDQy3EL1??cM!~$0i zktVWU7u!q%o)D_~V8rp(GTy)U8Ue_9+gg0GkP}0`Nri z@Twt_At7%7szyjS&Te z+*=ouS(QyqZmdHuL3F|s1SfQa8Aj}}Y@&Q; z>L`~#WdF%lOPwTq!#gy4;WyVCo}p1n_j51?t;bDt={XIP49!-?Y9jmFn*9 z>FJV^xrq}am|VGmTY_zn@lHjQWA$fWEdr8@Px-^6KQcR_AF;70j*)J^VzTM4TZS)@ zPfXs?NPr4I^DH@I_d8QR+|o?uxcz2sBsYa$(+PfUak}-%nV(4e8Vr2+duimKh9>vy zgn(o=G+=8KZj35%tu$3E7wtP~$Re@EXnwx5_^Qk}3IE7GB>L1Eb1bB#W{p}RjnZ#E zm>Cb%wANLug>WaC7YwU)_U|8a>0C_!q$))Y?8Y_VT$-k*|^%)U{|49*398t^|IONo?Gep1szZkkDTl|a6vtnPsH$ZyBJ zA;sZbe6^2O(Rf9ug-zEFZ|}sjg5ss!T1HrzXK<8Y1oKSx`iVD|vb;j^&1bSNcZFm& z%a{l_nucjV=nBO@7l?co(#*!ddh@devkWZ+;Ae*Ov_leU&9-LP&7Uk(SQcJySmP4& zr($QDwpC$Y_yH!^NMyTBx5fef8A$fm-4dGy3QuV9h~BTNoN$Xu#G_lQrnv%SOnp@~ zW4n*Gjb@-cLcK<(`!h^&cPJLcl*^)MV^LyBVo|da%&@4!h+vZrEEXk}B=f!~@y)m9 z*Ysjrg0a+9pa$=cPG*Zk!r{>=aJbEynKJ%V2~9%E78YIfX1{}U!2BC=9WfPrh?3A5I&rtOE5zKH~5 zNRl_8c@u3QW)k?^h;;#7hBIJ-b!MBZ<7#XPA|}W!wKcZiByh0Q#wBe>>umK#TF%e{gYIN zOR!DiH>XO!d@B)&=(#$Kp;f1)!U8i-BFHuU2N?rsS`L!5y(}AHoQP8HyOit5R<7XI za_tVj{ZWwFm%YbU!ckVH%GmNE-^AjmZwiu*M)d+8%dT?ffk*|P(6GesqG2sYT4bxN zRO;T$pP0`w_G4N!W$X|=#{nALs|+v#-W%Bt2MNGiutac|yMg#VMh-)6LA)`dM+fqo z9mu6W`1hfSuqkFGG8RkgK%hhOc|f6tY|1`Kpql2;p_6avuCFx$Nx%hI`_Z8+H4Ugd zvI;8a?Hv>$kA$?u=BTF`NkY2AL17xD=HFRao|I7U9Q=295)k8v6ujbOzAskr8tkvh zLt`@_6qthN4geTU06Y~P$q$%{M}*APUB@&)adY{qxQ!Z;ykn|GZpA6cO$$%lvs>BY z89)2nRzyYvNqVNt_7%5+3pypq+H4d;*xz2o#eLMc+wvO2$W(lg(_v{fH2;HW{4z0GY62}>JpyeR36VF#k1o1k5|F7<0pzDLh9VCs> zYfMwPf0ezMkbM)YgMQl2BAT-{m)h#@_w|pjQlIL^4{(30ddaFUw^hHwS3j{@_1%8m z;)ziGH>A=8r3Rnr%55XwVheBnh!*ucyrC3)b9kck!8f8{FKP9nWx$T3Woy(ajvKuB z+ZdfQk@s1vcN9&P1&9Y4uSwATV=M$yR8}h63*a3~cr=odrsXV-nj^F2X1>swY+CDH z>xS@Kdn={a&ZVb*TpUqzI)#NJl}<}NxHR%AH>P5DYs-P<2nWKoWtz|{O5d7G=U%cn z)iQ0=xGbYpdwILe?n}KRurZ~vqY+G@tq-_8iIy5Lpl}Sji`4-55Fw=-OS$`p#!El9 zduT)Ho;Rj8y-iy;NPTy!WDK6S)mm?ZB z7N>Mf+^YoY_+fj&D~IOe=CM z(!_qvoCmOx!Qo-U{(6#s-kN$g4BJR=rT5>2lWVv^!||Sm05{f<3(sj&gYF)Q?hOAG zxxzNJ6=`k~zK!omgHuQ-fhi^dbdQS(&zP)rxwO+6=G%1U!OU=^GaGGd#dOg2!Xzld zVW=I5n7kzKlR1vF7FAyzEpfF}y(vED%>N>IRjA1A4YPRA>$^)c!wd5r50@O;2rxEX#X>2n+z(rvgPZr#o}nDWt&Xp z2TKpUIeqct0X7R`O|Ur<<2Zw~6qsJ_c;KNiHmN?sGI)7hXMt&ae!r5E3o z-g;K45~J^L?gV{yfD)`BRURGohPZ5uP>(b6!^aVc&ye&Awsz3wz2iOclcnJy0$;(^V`_hBKD{jtSB*S8EV}8&)L}ZZJ zhz>Mh*A0{^|GaKvn5GokkL>3@5<1B#gT6v=V@|ilk&%fX`rj$=p88-{;l6x7%7Se9 zf{g?Q(KS89{~0%;F=&N@?>9;0X5^;M+(e!)dc290%VYeN*Y(>ZWE%9&fROZ0(^?){PFCod&E=(CdWLwZz6XS zD&Ea_Yw>UXFk51IJNrMV*7BRK&!w-wWw@K#`5dSEyKa6uUtK>L-a=E#-vUIghupa0 z>elHT*5QqE)T?dWN`ojGxr=E&)5R4tkv9?E$j zIkVJ#%hg>;7;>0MUAe6eCZ;jZ5n9c=!H6cP2v`3%r^If;i^(p6Ccl?pj~1=5Ov73LFo5k9cweN^!&G` zr-p}aMz;G|OP7B4tvHMXsLc}a{-h>JaZpf_pQPbr%{h!2!5`iW{-^^tsE6HoCcb0CxH@*E*bAh3-I~N#; zbjq{zk%3_Lf1^0>&c4&yY?PMoVoo|t5fpVZMOq@4*jUXmoioM8<}}Y@5!~xGuH_*`Xv8rI!me2~_V(s9)L}&8 zer-|2P%bk)$b#Z$jsR*k4_ptq-9ocs2H?sFWk7^-{jb6`2&so&Vp6$bc%8b;p7YT>odvS2c$n-|iPeJVwI z_}49@H$PbswF4vwP_Nq7C2~wWm=~FY(mWMOhT=(!O4-4k@XJe6VM%<{$`3>I_l4|cu=b$|qH zLv(7EP_ zj`zIvH7=sB^qkGZ>z-jEDrS#^ia>r1?9o}Ct9l|{-Qv=(R7r=J(!1Z59w+Bif~Q+T zSryT8Dt+s%!&~;wvwi2#N|b#9t*muJD_)ltTB-E?#_$AJW_{~BnQ5H#(I$`g|Fm}1 z26WbjFmALCH)zBes6&Y)AmC~u5deX`Ddy5S;hmrQVFf)vv$CHW?4DJ%d0fpNwEY(s z$>1WGhZVuhek?m{cs8vJQ4TKF>0zAc3SEezjAvVM$0F6D4>FdrbR6ffNl=A$nv){2 znYBob~tQ(zI;J6aYIOhE30{fyYFNHk0 z-KUR(ijOCEpPIrAnGwExEIsxjEqEPgiWa&cJ&nB2W!$EBv{$6i9Vh(R0&^)(%oQ(0 z2}%VUIlxa62dTpeA1~eY&h%LKj?x?AA1qov*B|}x zz&q1BeyXaiWzHm2^o<(ufLJp#WCLd4e!$hv2%mKBMkdh#dHwLebwdm|Pf>a_>SwI_B+H5<+mXG<-Z9f4jxVxhggLg=@+6G2^LoekQ&vQ9X;(lMc;8h3UN z+r?3#sEzI+o7SEt5NjPhs7+^xP20P+a4>R1Q9e;a+1rOiozdb67N8gNCHhr#zi0;r zWIj+uQj0hjozBRDXiRnhoz6Xlxr#PM|JjjdbRxn~{ja>Egz?{jQt(ltWH`CS!9O|h zgfTnvP^YW@Y!e=Sv(csLh$cM>(%{WQc9=Xj+3!Y01f|n-(fb886edmAJ484tKMJ`f zBU#|;!4ix2x>W;lwQCc*%$xwTJsKu*MTEe}Rom9c>2 zE>y@C8oPNui!FEl&lqDgLq=AEc5-N!&)5|=E5--Ch{wfx^sK;BE#Ukg5uk8%1C;N& zZjQB3wcyV`NeoJSzFkX33Iz*?z`f{Pv6cj6C3xe(d`R{1Jw)5N*O!j`S$gwN5uE16 z@(=nwj%}d@O!}#5?PU{;IyGGuF*$*IZaiS;CwvG&*+K{u9>8+Pw;!jMlop8;EIs`_ z_$9^LrCm67sd8@Xk|Q2{B3L`vAjUvJAD~$8U<`M3JI>&?ahhW>YZkH_V=f+DZ2A~J zT3cRjDQm+a-&)HlP@T%&Cu(;r$*+*o@crrip(18d!SazeWD&%-4QWXqQoxKrRBT`> zSTZOz@mS4c7TK`3+X4j@Bw_r$HWBn@4-G|efT$RrU1)sNbS(q)MEjz@qdSy*j%0N@ zcSlk@eR&lg@YVKrW-dP1n#!}QziqMhy-zCT?o6*M8GHX=>G|&^Lr|r3>wD8IfV0My zIOnNV{Xpq!RPB%qr`kcaGlySV<=x-$j`OUNhtpVQX;7~}aJszxi*Ntp%W{)j&_qND zIwkK5P2(#YV%4=346UTnQnH6HD{8CLgrwj@te~H(Cht%-zr#f3F`G@rE*|-Ui3aCF zR3R}Ki-6_RcvT(xHfHkFuIww#L-3GdV2usb*J~KY&IW=aQ9W?zhPB)DOfA@Ybyv+; zEvg|AGbc5!tKpjZ2Ha)+2AGTb8%FB}4A1xQ9nVI6EoEbMvOD<1UHDDc`=}h$Bo7a2 zlHk!Q9te)y4LtH!d5{+TPy>zxDmGp?l=Anmx(1VtDTQ83DH6^l&K-(Q#m!f<#ATxS zpeXw0&Cqcpt}39;Pfdk=&O=hQ5d9X_hYfOCA_l=|`=3mhg0sbYj_8~BcxXaHgXdNyzTOtwyXs2{P3q@t)2q>1*2*l&U-Z_wIn1#0P z5%T!HO9(|~3e!?m%ohgDSy((uu^}Aa-_t_~LuGK3|4;LN-K`c-x$wj!(a(Oh>E#9! z=r^yPHZuXsym~HP9&6T{5AMXor=&jHpijT{4cXwmHN0+5&Ytd*KK&_8%hpKImZ6cD z7vmwHD&2e}GugeqlsPeS!Icgkkbf1fK&b+p0oj=E#=HW{*K|MV%r<#sYI2bTwZ4Oy zvbC^xeb9qTqDS*0s?zYfbNs0gcGnW)>#>%4uIWBi`rKcpFS^p$2{>G1H?ud_gK9=? zoD9r;6yx3f!SuH0G_=HRK_y&57uuO%icPjE;lz#Jr z=Meo6JD*69VXOYOzc z>>WdSch)+>yenWTX4NoX1@oT}<|j&j@&WQVtuMXf1LMQ2%1l!Z@#t@qirP&u@$RR`qdAmw}(c} z7}zW|OD(z1P9S&zS~2b+o%^Ef>%_X{dhhW}1*xhLR9LhmZSRJu2zm0wTj~Uu^)=lm z-8wR6WSoXGNWIxxY09ym-cSpU^%)iH!{;x0w}pSLi-Xh(>p?R9fmIXpFk(s6nsiJ{ z;=z~_04>5-oU`6)C)p2CF!AzyzVF!Pf-1H(;drY#=1dao?Y@7iRV4zo+-DqEHBI`$ z@w2n2MQHy_$HyXeGh1di0I#xfO$gS2ay=wCMxy-_4rdlVAqv*s1^#o}-}Qblx)L9x zD`Xm~m9X<_(=8ruZ^#V$<-Q+}Ar&(*K{z`EGo|(J+|#mmBIR{sxjeC^BzT?{hXQiC z3f^TLr7>Fi?AM364PE-uvO|HLxf1{OC+(49>xuFp?u_U&w7roVws_9%I*+3DB9EX3 zq9LV%hr3{eE@IaCW^o?1O2Hh{!S__;4}&4^w6JRSPStP+OtrW;m=prU#g`pYbNCfc z7+YE9ymNZXjR|JXIy5Lcn0XST$2~bm(+R!VbS||>A%S3#!cuAXN6EY6y1~2aWQLXz zHb+usBv;M-z8ftDxdA-xVY_T|rLK$q<(>v6UO$9qkzJ4VJ*n<4iM<>&c0{@pv-wh! z7~Q+V=tMVI4i?{xx?xWDo}^D9DSb7lBhK7N{XUld)(OT<7 zK`1O~8njTT1o^KT7KLw~Ulj;P8O+kiPq4^7t7-J`oM7BIwFzyDtBypU@p6^wyeE?QtZg^m!d~YW|5u z&op|2<2ug)Lxx$Mr%p9fPWS&msN_+Yc0&((UpLAli0l#QlJZCOxZDqX zmHzeDxAvK;9v+jHxWG((STh*R8@WC$+7s$?N?yzOHBXolwV)5pa|c`Ixb%xun9yL% z6T{3tEqmN#HE!NsXBFQrtJo;Rrv7Q!qkY)B=v|s>)B{_9sAp#SLLTTmjzB;?8Ckx| z_Ng?CJ;)e_vj;mQdcvvf_Kju9zDxFKq0oC(x9!BSM6T&x;GLJ1Cdj}*MBh!D6#F#HNK20v0IpO;KR&zEke3>tNI4&IdC~TTOq~f@vRrN2dYB8=l zxD$*-Ds$ zH^P3lkZ>(E@oT>2ov>|k%n+`k>1Ly!908s^p#Ak_zZIQ?3)Q?%f|?0IeN)D4i^4jJ z>#>HD^-OWV#bi51C|kyaXy&L$gJ#Ud6rF?PZ=;hIlZrKsz2pIRMDuNwak)Fa10(WF zM`x>qI3~LEZ=c~Jfd9gsd?+p4{qHm5?tLD2zxBEF9&opKWO!ppN}4<={1m!l+A`XztQ!OB71OP;pWB<<4!?JipZ4=>Cs3` z&*n=udJYTjsIc6_34)ft)PA3HaUK=hI5qM$hE~B#Ztmv-TQ#bu+!rGFg49QWw#*Os zk*U|4Jz*?!S-UqhI@9%$aUFIKDik3iK)Fw%GL^W@7vv^)OT}&V3j_vZ&1y@~Hq^3j zh&N8UI!k9uiQk?qoDxU?P9bv!i&PkAQz-P9b^1U1k{;JjcK)DbBm%4KlpMJryQ*(D zN#?HC^CC$QY;|c?#EXz3c1)-oPl~%VG&0NYx1Sdmu{u; zrK_QWaV;E(lp6<@R$woM* z5kmjmMmH&MN2}tIAqR$wipHUdZ4it~BI)+QmZHNO1%!20=TLXC7VM>`uxh23L;<_svPBQDf7Z1uHf< z@YyyvaX8z&j?UUAxzUAq^#)a3X%xZjHJ_-uZ2j&aj>rTJu4(^xQ9|$Nu|ilie<4CyMZxvw1=4 zwncdOJ>ht=-Rwy#P}Ho6@#_)d$j7KLh5`uzd5ploywL}KV+*cPFzw*S0 z432+1jv_;YIJ3z3DIXTKn#j28d_~5uBt=H}#E1-3UnTl(lPkSpuz6kSj#KIE8nWb9 z(z#zr$ddPZSyDdLk|hiH7L&#Y^3{xwc}r-u()h4gQuJTNlFImK5`khaescV*ZhT0& z?=U{dZ0L=PusCjtrfh|l@o^&lec2_IEt?o`86RarEb4-wV7hO@_#nYrQF>vWANZtR zWLVS{$zfXPH?wAZG$>za24YtF5PB)?321i z(3u$@m>o0)=3B-`(rVSGH`k33iOO@mAlR-${ZklfHb1{fQ6DETAb zB29fSQAWnc{Z`gF0nK`>hK6@7PN(G{U(NVHPxZ#fDL)r7KF9(|OiL5|T5JouJ8_C? zf^VIf;BeLisWKoGUF(n3I#ve_|S+n@{Wz zEGT>7CeL0F(Q7%0T+-~=R?&RmV3Vz)@YM`i!V-t{Vt$%$4>|@!Hg%Yd4&xoM}6EZ?W(>y_MM@ znU~yDnT+l878!PL_HON-4zr1a(J(t{(LJK9;-nczX64E_ib7*oz^^GY_^nNncLRY! zXj`^XSkVBOvYDdd0D`liUOut6y{V;R6#`TUz&eajoXukBOvJLv8Ja((Qf`^ZJXvipKm|g~P9n>zliZzw?=K=bK{RW4 z&J(jHtI|wb%T#AEs-0vPPn$cq;v6d+I&4i_Met&wFl(0e%>f+Gn$>gBfjX?%!bd%> z_f1#NEpaNY94U?1X-+poHJAM?VWc`Euwd*`HfgJug1Ctd|F&`Y-p_!}&_Km%rhl~^ zX-=Ynmc?jhHp4ZeQi!L5M|@}4?Td8o)0mY-ev1($*OBGP*V$+9yFkRif;qK<$Hojx z{K5`gCo1rPBn(S#`E|j@v0>?G6>}RFsg=LFVd+4k5K;Q=g-ZYM#q=o0T>G=$;Owxs zU!>DmcO? z!m==Rka}r9^K((&Gk*l=Md{p&68hBrUZ48SubDn|r&b*Uuq9;56I{kCPbd9cXg10`6Z)((L40%3GTKqxfbMwJRKoVX2K0bcy-+nP z8_abp8R-^>N=v({mwxPjli$8-Q=#b^l#kXZy2?qsC27dZPEy)DnNq-lR6Ki7p_Y|* zZ3^|hyfB|GA@wAmOz26JcWP4P(~Z=mpg9q0?9JAE2%*Hvb6kPuO#Vd5s(e!`Q8)JN!)(VPrD%Yt^7Tv927Ux_gyxkqVw~Hfnp(?pL^e@+6 zT0feZ3~g5g3x@uk?ohR7y5LQX=Lv9epN)~bIZVqDPPKhgAYN*w>%nm;!rf+@IF{t< z%4A_Z*2c37(mEw`ZDmg=JV$1cDVFd|ci?s6=@zEXZ@SGVH(}C$PGZLG^hlC9eSI+d zwxR5MECR!cS@BWVH1;f&cXDD?Ou!CbY4u9$pdCK7hRJ$>pq}n0dBH#~9D2H(=ccww zfw#C<({mk3Gg00WFzM$q_e#MxoiWyvoqlYKz-pLwEK3p1h)?B7hZJnp37Em`+ksDM zoM%9(%AqI6rCo_HL3De&ZI=mWhK(;NUTabM$nPj)MHCQQhMn2gT9rmc zrmf=;c0vPm1zp#4yFBk#at2xEms>3yYnYvMDw_^di7J`LNWwd1D2z|}da{(dwUTYwK_M=v(i>5K{&#VBab(O%3wY};VTwkV!! znq+UgCh3T@U6V|$&?FrZduich2H+2s?)umC#(nRI_bAY^jq+V_ZP>_uZtY=+2camH zzW%R7U=P70onG0x9Kr45Ji~d(r1*PIKr6>=piS8byfjY6NxZT+dL=4|VdyL#Opta| zrgP6os9*0v{pz7!>9vi=Pya$K2K+Yt>qM+zEpIUWt88T={VS6BlK7^7IW?=Lf3>Mu z(%Yh)^WsJ>&63dc+ytE$qx%$+E>0c68NOu$BZg;&B7x_Y`{^c|;r`E}`JhT2q z>Bm9;dcf;n6@5P@`d3RKa{8C94in>P>0jKnW-6&zjob<7*%6VB>0hz7#syr~k*?aZN$*s<)Xp+q%d&Jbr>nTRGo4J_LESsK&8tZE zpj)*mW$ANO$~4E1fll@dWMV{wQ37Hp_3KmVl{o|_H5~Bee z_+gDT#)Cg!1(4kg6 z)9GKe@uzi`tgR?{76q(;qx0Xn4+;{mnkpoX#p@=Wn$YYSG$E%Xtf2|XpwazDD>R`c z<7e2(;YwBL3DtxGtvNv>_41uVP8niiI&~ox8O%O%T}YTX2VF=`|J*vK3oVYvy3o?N zxqpSo)h8}j4>Brq~c=!w5zzWhU*u;({RPQl5GxV9X8HU!+kbJ#(mSzv|}FH za_i*d2^H6n{CI1)662A}!)^6SeO*(-b;Z}#aBunE88zHzwrjX*Z7mHq5h;lbm6nDpB)T2h!@eT|@157#86cX%e)Bp?*Rb_3>&0HR=7*3tw*fmuu;@^m z4$Jn4^AQeqZmiaAF)ys#SgmPf7`r0d&dAZGRazx-w88FD>mLoU<*xG`{O7!SbuMZ% zzX@xJOWWjLv-=k1f$w*o#@MYVwAijENf4aYj_Z+g*oj?RxgPm(rtAH<9@}?q!!Yz) zwzy(RpYxXJWO4>}f*+|Na8;DpX)Rnsq-!b&#(TS0M z--)ZPD!l}Sw>(LhEM`2Vf(>fS97%h-PY9*v9VGhaKD`2)#!U zZaM@mJX>x!me(9vcN%&};DtwO1DT{7o%VL0;&?-hRCg8U0DT|OZ*~kqB(r1~NCkgG zGHpRnU+*c$PCN8FZM2*%-McQcGjz_$a-7@HIVTTf6!N0Yv38M#3SrqP#edzVw{jU_ zQ@iqLZiW(km0r6h+@WJ7I`^oxk_xI}m%ub)5+!;-ExgI^gv+-Wq!anSAXzp!`js`@NVKcyV%cYiX*9;9@Ufz`B0VX~!7Mn&4R$zi(H`i8bV7?C#V z%t48vmD*%nI*c5qt^I1JkE7jy73L?QO`>`5rV_vL`_&1BQ)z2E_p6x$OOmNITIRVe z*iTwy6zwMkU)tm;bF{ksY6~xLerk=CRy+BYC%xJ(IJ1WxkYKu zrq}$XTDK9?wDR0x7R{b6v1JpxH%H31;L}}g%BJyFh7jqRTsm!T+n`|rz7WS>8Yk3^ zmUco%yIhJ+b&-EuGwmuB;RejFD)~cXnj76RT^LJU=l)T^k)@MNgN_Hgp}1E=H@L&Y z$znscbLdCxYw}7mo*syVa4M4%#VAe^`921LiJ1TcItQb&UXv|H7qp@vG3NtL%Q7eJ z5@>CsPgN7KDMD1Cy*kRK&BTa{2j&)K%nKdo>}U{2tW8^4V3rQ_*T7kn&_h2)F3w<*HlECN zOxgDs#z~6AukxE|i#8x7sH_VDDw9b;1*z0!i>h>Lpk!TpQcw!5nG|%zphAtxT+dBR z#CM$}dFQSuF$4|Cq;zAjnoN}mwR*0QX{%d7BkrWZ1-v&bd)c#i{YVZ8DOB( z%uU-$MggSZV%si;?W)ia*V<)ivRmAcLyFWh!_9OOEWRXo1|255?rDu8{D`&b8@5g` zLurLoCr0nJL^DNw+=xQY&*Ibyqs-aQvPr=5|A8~?z!2%deNH3Bu9;ZYeJgZlJl^KQ zE?q!o7&0wP*@d!bn`j^r+p+?Uwwr?D#H|o+0De2gMe*>e4gxhcj0)RMaZ$L8)NiLG zLvn0_?0k*K#v0ox&vII4O)Sp$dbQhl>D*3vmN(s*e7PGmN#H^|{1F|npOP8PYzi%C z+}YAeKdq(&5e`cgqj!d`S=2P3CY0M&3j9phk;#~ZmW*gzRTO$Hw7E%V=h}QHlzp#1 z7Q&9`M+dm=Q@M$PI~reCs{+NDt!8seILk&5F0j>olb!LD&(g4sR*h7R#7!$*LTBpx zg%0!eI7~GPp^eh1yJ2C{B+S>9fe&ujUd3>deymj3keS$@#$1i8)hO=4m6kYFcKw#M zYKy2^DsRZ-LMN;pRgzXp8kdvlo={Vk!~d+J)n$l>cWY=!CbN+cp4dlc6rL9x%x#1% zX1|weNzYc|#l~D_V|Ye!c~)tO|4>p=e#BS|>V9|_nyIr*vAS3wHj@IepJLbNI)05g z|FvqwiNn$_|1CMl&ngFDIOie<4O~!I7&S&Bx&75|aNkFKDLewN1$-<9Ltbk!v<>0k zRWTt4uh;a#OFLwz>cj*4f$Ch_5`(l!!tBqm08FGQG4N_4&6KZBndY+QKlj$mrqXLS zX0n@|4O?B`ZfZU&oqJa4eH$~EbYxC;VabI!Ckl5HW#0?8$9!KsD~eUXeXOh~@3QsR zT&Ja*%B>G@9Qn$p*R7f~2RqeV$jx=sey7!L#I<|l+7GW@TRR*esG-Wota3fBOnP14 z^+T&yHVYQ>|5sJ}9ag&**G8QC+7GT?d(m}WQ{}f?at9fdqaP|ZAfTPF6854@eCC=kLhG)(j!u{KQL#U1!|hU z2lZuj^v}*nxSf{)e+T(-)C7N&c`3evTz4&0)o2y9qjmNOn9W6GzD2J{%|&U3{27ZM zbBdHqZfzL62Tg)dWU1f17Uc@JI5Hytf>JBxxyviGpw~f*dG)cW>Q(qfC%KiODkzou zQq7(8LSOJmny_-&B_rhjpS^d1kFzZE{*%cx$C>s?n>J}n+ev_)pp=8)a!`1XgC_(; zkpG?pb=S3cLh;>qS6^q)s?n_)6nUcCU7{9^E4#+s?iv-Bh^$7e5)?IR)#xHbMGaat zNLSzQ@4D~jFq271yDa?sd6AEOGBeM8x~}WK&i8fC#&6*eN!}GG^FCW+uUs75WTe3- zm#QESH#|k(JRiPLmB0#UtIA9}a~QPHuQa1HdG&tv$XDudnjS6cH-$bW#Ys^6wo(m@ggAcKCA0Dde!?H?o{nJy7&HX|qwnO!O z@)iu6f#%F=wGte%4>E`?C!R`njK^$kr45_3i}o=+(;T(Es!%X2uX$`@DDzB_p+UC^ z?Rcnz$6!tT;~6hZHAx4MDtO2>Bq^UTMB?R?@eV>{1WDBHPnezx;WGj-*aew5gb zO?qCo<4|yjZ0DH^Wjh&o)Uust9wOU$W`^y&&$FG+_C3yaEP8O*u$>DY3%2vg!;I}* zuu!)1<@wpp1&=n{u}RO%b{q;0k?mZtP_~nSM=jgA;1Jo)1sS$8kTK6RL}oo6kS?U0r=f<+HA)rJulmFC^$s6bKyeSP6i&e zZ0Ev5WIGpT*v{X3w)46Z9%nm;7u&h$v0yu&KFrw8MGIv+U!0%qT=ZzO9h>yLY{#MC z5ZTT}3uQYQc+|3;iw=?PT$Euuf9KiGi%xl*?Hpcg=i_2ElyLY{#MC5ZTUi7s_@r@Tg@w&pkx8^V|&E zxxurY&t3jF+c~`0&gG8<+xd^fjO|>$P`2~g`Pt6pk2c$}Nzco6910GR?OeW4wv&NJ zE!(;L5ZTV<8MgCo&vrigC$8Au&BdNoR-m0Cd#baQ96aZ9e|L24my1VO%}XmE!Obe^ z+4Bj?R8FdeJH}2eFaGAtYw$TERZg{=Y|7sR z@8OrNjkk2FQI0weTN*|?b%LA&!{sJbg0u^SIF?SCARS58u~&}Z<{d~lDozE)8*{MFJ+w}%E*@ak{*b*}h`)lV*bj%ol_hk?#O&%M zmC188bo}{$PDQr`Kpt*;DKWd(LTR%SG|1&ECaEkz7XTmw)Uv|XNG@{}_x*4SQPyf|I569#s81+f-@fxRyAQYV)V>+(5QP*U|~H(H6LFn0|a41e}@Cs&YCPO=3nA_xZ>xAiWga9w^|Z%15y zY2PweeraY|Z&z6E&GUVM|1T-?Pc#2-cLlL>XHl4`xzw$~Os%C3m2su+RBH)+Y)weL z+6GL6mau%DSu3(6RAJd)VI45MRBWk)W&&YOtW1i6PjvMay%8ksJD~QJDq5A%@lI

klQuD3klS*=7rL(G}P%(A6 zT6D>Qs$M(elA30tnv_~)W`~7mvy)BO1LA=!Lx1n<+XE)~Qqzy!HO}c|Yljl0_m zjO4T z@KQRU2xCugGt^8iOuK5>4Agwo`x9=pccy5IY-%nL;0}-D0dT1&_*_nRQUFL5Ws|{Q zav!M71|1%uJ1oLuU$QMc_9|>7gLd@ems379MS`!1nW8BlvMB7@2D_h7TWof5%uw_M zbONn5Jr$VQ$IVwQZfaQ6sET=ddvJ-pBVrALe{Kt#t_~Y{qh(QxZu>4RD;0x(KAxG1 zIIUb<+EdF_i*xxV12r9)A+5UELGY>Q6R%*_)|RGLd0)GmUno*}H8p}&Ic(G{&^o-r z96YK?r($M!P{MmZ{)QlI3TB{JuDlSl6Z3Jk@ytU*Pb&pftD%Pl8+-ukfExQdJKej~ zzU$PG`pFg4r!<{ona(4kUaYLK3f1)$gSVO`&!z2psku}DFXi+4T2}WKN<~o0AK*uB z6RBZFgDT5s^Pa0)=eV`Ms|uHb@!Mce zyOv2AWLxHzh97i6Mv`FFb>QvVREp^{^U3Nu{JP{~a7n3btaV6BbLGs1V_i+m7K01=B8rcF^=52t^mP9YS zhMKm%zVX9SMJjLS5CyC;Aqsa%OO`0ue6{R2!O_y-Ok>BMDxj#j+*sQ~T7a)PDW;Uj z#nH~_sjn`qD;p<9B?8A`Oc=G?I2m-}D-g!glrWGNMi?kf2P?~RM>&zUb$9!s=qIl( z1m*7YicK)a4sE;USnG0yCY!z1baIwAh41c{&INTF z)E%}3^%lz4r&5+i)o3-%=p5~WxvG(hWF9~@>Z;-mH%6!bWnop>@b%x2ztx(W2`lMB z|MEaa7c!kVom^HGIt`+z(uF>yhFw*<5CZ&dhB8F@+r`36mN%O$|AUMyZ!W#PvGle^ zC(V;h57uLe4$cyZ9$g)_FDoW0liEht{pm8w)F&EMb1j`VOPIe6RSMbLVMYXk2a_US zXy|C*VusQZ6Q5j=6nw}@(uR_*^J?pDmhm8cqi}9Xm3v`N!zRXSTWdTXhGGJwpZB=%KQ+0`V3e~73H*YlcEzW1)^ z%D*b~bfjP16>a~k!iL4k7iusOed4bYkIf6s0}pwc!T>Xcz3MJ_VRmcduI|28u-R;u z6jGkb#NB;uy?K`BIB;^?k^R`;9sDz0^`j%l|LMJ+D0nRv0y+oRRoNO%1m;5+X7J_q zQmZ)3*2aAVinx)?iU(_FRQEl**!o72dV26G6pHcrz^C*4s{wbu_bZLwLk+{O9_=ay z`g4}4@54u*z?rpM_jC9&)Go7WcG%~w%(x|?a>)+GX*J=Xi+~PYMrAL^( z>gd8CeC2Br2ygozhOmQl3g>4o2+pnU0_}%`b9D7dxL(;O?72AbAz zg})={Kw$oAJurXCfw`{{dhNWll*?4iMcX06892^SrxT%U!v<45hx~*uJt|1<@2@zmzysjIvc+Tqn2Ki}R zGnrK_;jJ@>U3YBc9j-IAG@u+t1M537QJ1IXV#UTQcr>^QH|LHFxa}6 zD<#)MIPJKF;r-=G(gnxZzg(r1>fPa}eSjQ3Ou;==UFj>Sj-?wQ z!3g%Q4{W1hnIf!anaOb}giVg;q`PHC)BD9QxPoYXMr_)Bhi@R3Y&~f6yan7Vbb=cw&4F7LI|T zXyi?WuC+pv`R2%7(ED_ppmv};`uo=x)|PQk^~H70b@!aCWHx8;?lkQQdMuD8Ny94+ibue{?7GYx8J z2}FKy#1PGVuFw@IIxSVb*H~9L8-4!`g%vLm&ISB`=XwhGdBZ_r^k5M>aa*D1g1X2& zE{jZW8C?#2dFaUBS|6wDeu44 zS-?!1tMLGC1u~GW%D=!HN)g20-BLOo;$|nGju$chi<{PLcVpX~-tZ-vz!)uG)Duhk z^v3ZX$K>Q#xuqMFtvsz^k9aqqKiQ4*>a>PCdbp)9AKVr#c~fEeMOl-#?yh{~aCpk2 zgTwdTgbY395yD~FayGcLyKI*zh(ks#ea)i0aJ*4U({+e4uBt%)4!iI=wr-;Z{&iX3 zdix=63_)YX*%^-UzM>kz9oA$_O`5LR#y8q_q|uBCjTV*QGk-!6<=`Q9HQ7>uyCXqk zjdBH$T_PvHwbLHBI1c|BBQA-3b9RpgDPFNb5O#Q{-u20q4Fh)nhFJ#fYT4ftRJiisZ8rqHd8t5K8pf7~YtEO@ zlt(&ju|LCENqRi^%aafTPVpubcgujONH;=b@6DIvoAmNq8Umo6cPTEV)_=U%GP6)g z`te#CkF4kq1g`m?@#gLm-D7{7N6ZN#yj3f~K}K0IjldM1T5%-8AKdK4k-6DYeaAc5 zz;orx?Lr>)-J0+p+u`0(`#eA zR^+wTQn@*Jedbr`Aa>>4T7HEEHU}Sv)d){&o2X4{du=1X#`=y2XdLZpIp3)H@l)GN#K{vCt{^!@v|0Bz%ci`Uy# zo`m9XU*^w_)?U}qV(L&-e0yO_*>X51QGvjiVkQ1+9Kst_7jVC$(WUme$ZZ|4n)i^@ zzU@mg%oJgdCpx`BM^|W~RH7;DZ*|B#@@nNkT5Kc!>AhGmTuZJid`yZ=M|C$tNBw7r$3p)DV!ZrrH``+=+m_eo?&yet!ltr`AJ}lK z)c?e5h5V7%%A}83yB3=YKR1pzF9aX3brXxScl6ev0`0QR9e!e~n4pxs;-RL6%gR=_ zW^s3I`y(H|0*LNS=T5OVWtZUVTpE7!2Ml{uFl)n-KkMGaN#opV3N7L&UyQJ00QrUO zi+$0$>k1|O7EyRz;fYB+K~|?pym}E0gkEa#I1e+TFOK8N;%Sa!_BjhOWz1;MBm2iB zoUuoTdZ9WzH8>T)!5vKLEx@V9EPGl(82*)!l~_T;xDJVvM7{4StccEgXQ4Y$5Fe>E zL_uzG&$H^z_)8X$K7=_(cU}+g=rG5S+X;v=WD=bQ2^~aNUSH_B ztS$^sN;z)^@h-Wd)wq=DO7r7iuw3_&1w-+-w_`D#^xp!aIV22l<<`O?aU7XKPU)2G z-{O@{a>82`wC21(kDT%*+gj%+bEBq#Vavc=pq;#WcCp+3F1{z-mUXGPH zAGvFz3puPX`OOV*@X0dD>1G^uS|6IJUBbuH?!H6O58H#k4u2{Le*D($0XIsQZ$O2- z{XLNmB;frIUPEi%w*v}j`!FQi(nA@im8H$JDw^rEU`O8u1E@UR*<3W&s&o(-LYk^E ztm(Y_OEx**Qt*Xu|*pOiBZaGl7MW)wsDDI+m`_-R$^x@OjAQ#b|h}q>25n@ zB2!I%M(;HT98ZcvMUDXLB=~F7NiNDoGAtU(w=_Y_apr%b<9I8lIYZtIGNZgBXF87jv>o~yV44ou%nkwG~d+QJ+eI{%AQo_6knydemuh6jV-Zn~2t z@uXP6Y=ymisP|i@AlC`i)`(T(PiRj!f4*&X^t`^s9gBKN2l-a?gq?-s8YiL`>`X#7 zE33jYOHXSs)tAjR6Mb?gn)?BxHN&vS#!SHUW?jN5QtCI4idYm`OUah!qI=v4(_+e_ zW(%}=R&k5eaJ3t=ucZpA2iliMrEJP?>{6v}_iu73?ows4hU3;E<~e)fj^uIn*mfO_ ztDn@Zb9d6u#8PwRNL5r2^!4URCfUj+w53d1Rd3PnviE6XE=b(rU%c7Jj? z{n5bz5el1HsPE@;R9VD^IkTpGeM5*g(?TR;Ey^?@ad;XMcJLFjWo$W-n>N9|vL>Ei zx+xFZ#C59BHEIjKMz8)}UjFiY-?3OVY4bPcl2fGK;B$asYDM(E)rHm7M;LXIlWbMZGl^SyAmg<^SAWE4bFBYAN zk64mC!vDd^NfKtAe2jDT+=5ViGOiHdR02tRm3)Oe!^DxYVFW)!QSesqj~8Sy$kA7~ z(6u=wK)8gy<>8~gnu7wZp_Ds`w`7pL4fJkmBD24G${nRAXS^S8wFOK>PyImR_BaB! z#{|1pBx8FLi8Cea@Od)&;0H(^Ea1*JkA`VTq%n;7_f3h$O)}*_+g`&hGUU}kb22SE ztzpLZr~Tr>^%)VhKb8<;0IV731KGEUh^lmQxL$>I92A@Z1DB zr!C2jM-%v(Nsv9h0kMK*EX25$sT_iSxclpPMLN7Q#=tLXF{m@Xp-D`H3@R@ncf>gg zX&k{+2?7Oj-LS{KfZX8)jqwLJQDXp#D1`g|lVOiZoiT1n(v>YlZO)j}>v&@ni*WNB zSRO99x-U;|NhZrFcb4-tcShT9Dr~8;(g*09J&^>{jsFdDsvDnHs;xV3jh2?N($n~~ z{G)wYD_v%l?7HySI20xuJ&jEpOdNw07!$8dWl2PAdS&#c4;Gd^^3$BTt-a@{`KRgJ zxC@@B+_=#zK2&Ia$?W1J>RQ(mlR)FXeVBE}J~C!qSJXaS=zHXWq|o%yb%FE+e~dzm z<)ENcmak(I`Me7R(@*~uFfrOA?y%~LF8WBJ`KgkjZs$qpc7*@%8+P3Z!Pkv13`n!I zV}yZlFI85mF}{JGXvtM8x{o=SjUwRIr@-T@-B}CTDi%mMNZ5Y=t#1s%;~z25 zW&;U=ZCP51Bo)lp*T2{$oyPz=w8ZpcNI{uSQjHS@7|q!03bhLGD_mlUv=N6hsco&X zr_3WuoV~Zm1};m1r#yvmYiT9OKMqjDimk;OXRNDO@i$9CjKANsn_1(8%DM7g%)kpd zfI+~(>%r2C+k%^1My-~Lk%fUEGwZIV7x38`ATmWXRi`jD2X8b1 zmlwcOiE{%`D7Fb!{7BG{?$(~L%iidgCQ;eA0_8a!ElCIBnTFaV?$IvFKh zFl}T?ywE#yfeBCfJK{GrAA#tb%yMhvtnXf`oZhz=^-8S4DWbrMxX7nm0j&XF&G|_oq|F*q8gk47@*ByqzVkpoxiEHq0|$7c%-nlaYHoDqr3qoKtdsB zh0*hxt|_&@(%4V9n_eyEh>K7$N2P%7HZA}aivls2<49+XvGWQ8+QNkHQ%zO-!0q(RmPK33RFKJszrEM5_v_|F{VFI=`EhGY+C1 za^$vaP7;tU56JV2ak6@B`-%a}Y2+NLo> zVNK|3BfHTtvOw9Fe~s#JqE{UzwVOFK(A%6lOH6D_!?zaqwM5sxr6Y|l8g*Ja@lbW( z#-Dqx)w#uh>gwE5C7xl9c8l(0LKvsI*a@<4Xifs5<#anw0(Nk!cn>X!e*Vtl%Cenq zZ~_uALAI`jl?h1*uKExo1+2vEr9?>%vTPEcyJ_XaqkaS{(XkZ}QMAAgjMKETUm z1;La}0BNK za5#x;u84Y86$>r>-mOsnx55c*^pE{p;Y<*#MRBM=q8OjTs4dc^>q!cEq=`iG$)3Jj zs-xf26H6?fD#KXcJCISVZjIz3mg(Mi~%>T#Yz?bqXh(iEjNk5fDcSqdiq*Is!!*=D{`+D>*0R{9BwNb!(6|&>H-X z(^yWvu^)|)Z3LZHr94DXX>m3akP`*S%c`>#A`x_xvUr0dhu@p=vfn$3G}&4FLrHAL z4+)7&)s;N@4L)toJPX1idRh4HCG8-Z4=xEmb1%&8$Y2W4Y;7D|?%veqQwpP4&oh*} zXw4LB(E)-E0;a}~V7lMuPX)iK_t3W?j&EmEaNg{x<@isSSoot)6pktPYnQVX*zKuf zMrQ^ zUzBTRqj$|vnxUR+2B6jE$vDrNGWmQ7?suA+r> zs!7cCc@8;l%6_^(RKx0^E9VpT8cyNG-+{*K6l16f3bHk|x7@jvy*}3Z9i3Y_WihUG z^OTQC{hNq>aZ6!q`3Gz;=|#85zY4UiHV9<1yHUqrck5d|jWmj5uwAv2YN`SV+SLS; z>K_kFFlFihw9}_Q=_Ix|55+8;U?x4m2sf5kx(tG8LmK)V3>g84?q4>C}UYlQq_wId_sw5VO0zI zl3H>`BTmabhlR5KAST2sGL7P(NcdlBG=~ts8x!L94xSLd>k09D(T?TC)zPNY7k5Ns zw-%NrfINTa5hn`A?%pel!2$zbQ|C-xnWT9sT)jg(ZLFLen2^pVvR@`I^->U^|kPobojn z%aC1FPY_13tEyue-o^=^I8Q7?T7)yH4nxoG90|sAO9kr&pYZKt!-ev`lf(V|uYN zn{be|CV4k$GxSl{OnyV<(3y0n)o2i{y1j68*@ZCWd2;7>oPTfy^L0MKSt&GRV^}*D zG`-jzAe60X0iikVoYM$jVsnw~47D3Vo1mTLN6IkBw3Bmc3{^=3e!E@klXFVWO*K?n zKMf7u@)Bek^8R;tUvG^E_LdpC&Oe(jCy%qqy*26HvVgHOD%2bLc4pmy_p!>#Kn=Gw zW0f)m(`e-&$W9@h`hPMA;eDAGAW7|H{7)}dAqDC;D0|h671OP)3vXAGreBMBn<3_B z)=d;Uqa@nR&SDFU#2zVU7Rv%i6Y`~Q%BnCM%^RLN6;+bqDfxO+Iowq)<;q(cUb5Pm zx|WP?wkRoDF4$aqSm2d&ZP8h2rV^X7kLVX2gM6cSgT4D3*rHE7Ky0G#;WO4!TXfYc zGcKq%SLgYcK5qf<3qD`yX*aD&5bBIx{`o>**|bGIOR~#SV&7_=+Tr*Z)#|a7CsBJ^Z;zC-ycAbo%-C4rP>&C3lgzdP)6-jCAUt0VFljUA zcgT*oTfSWIlT;WKdt+<(>fM;RHh`498P3Ab2xHRTGzXt9Wpi*f$Y<>!n^CJPGcC88 zA;tVDWf4eoChtd9tgGbB?st{E-L~-L>39Ov9^7nnP5!M_w3e45Zs-n__`1-p@ZM+| za+dqV`~yIJs4OC6OQ=4&<*MKs*%_Bd5F2$_D-ds{I4sp8|fv$N;TC5KB0qR7* z(e~&>RZYmZ=(}=a3NH@sAa#xRYhNG35fgWy@H@H!vr{0k!4h{*gvD=ii4r38~9*HVnF0Lt~GeaS= zGGV@Henq!Z0G&E%B*a-dU@Av*bq##A93+F%Vk<5>XL|U7s@@w?eU3{xCJyAA*jr2- znw1QdR-Y}Fcjxb&qk%(z&d=d#tkxfC87T6(Qztx0TtgOkI%LU?fZ_fmDmibQbI}VL zt)&&g4~>QiKKX*~E*Z=+SxD3H{UA5olx-sn8&Qr{yK|L?Z@w@!(B~im~n>k5kM=tCo)YqIT&vWwvG+DQc!R z4U#Oo0jox@Qlmoe4r6EXlk|01CS7x>?R<^t!(y_$TbZiU83>EzV@+e0sFOKWonGWL zlj$lD!)M+5=3a|6D0|1Jw~>;j4iS;!l8T5>S9BY06bcM&Gq0JXDYTr&?)m0OAHYeoam`>qle+`)DYdc~7y+JO@_Q(Xk+T856s~0^cwN zah@b$#$0+*oHFx;0BpWIrlEXNzVGTtC-jXHzzHhO_V zz>b4E#b8#nfnwm$Vn!Qp>tImwqP^mUmW82Va+=C`P$wlDJ-)7dLZ`6U3|gU2$+mE{ zOf$MtoeE2zMN=?np^SJEs5QGhO%EGYi3DDdIUXo3JVz~YG~m>dIA=myiR623E(uK6 zM0d~ZD4iLVzgAe!!Ickw%|d=ZA1`i{pfa`vf3ipm)@a5dR6)HiIfb)-TuL^7uM&- zmPcRydZGK7gUeOaICnlLmi9FCo{aL_8b%aaafP0+*Y!nO`APXwI+4i1rsY=Plj=d} zCi-G*uk!@*YCHQ;b!Sb^qk=wz_G0=|Io!Cvy+NEo8$~s%TZ12`jdpe^p~oC7uHx`O zDv(XAp{<(R91TF(ED8%_dZrqHTbVK7IUx(+^VM>b#X;ke#NO_TgI7A+qCk>^0m^`& zWO;TjsYXz-%)4v|OWrpD!Va06=USmnsRWjAz|+)Ob_ylckCRH^{3wbZ#sP!0b+c64ux~$$r97CcIlqKQ!TeG7cDt(C^2k*3K<%SMjfOXQdmdNQTgt*eM;QlcSrz zRX74(@`Z1i(yO|K?Z!x0HN`qRbH%Rc*l!mWmyMh7CGK4dW0~iM&*r(&W=k1HTBxwv zTVioQGlaY@>V*(MREXP`CUYjop2aBsOjWZj=H~eHy*o$*~_ZlVR zmAfycY-<<`K3V@gI^`v~Ck+LUaZ_VXYT^Icl&pCCmTHvbwrVaEcPjnJ#*scKV zqvo+-i~D=tb}KzuTR>GvRG^7nM?Keva(%5^8ZIqTo=7qx_-GUQS#$86w)6Oky%D|U zyM;ANsvTBuh`xVK``RNjSCimJfyT@WiMUOxq(txAhuP8Y>yk2eg!5$9t>|=CNqx!A zd@$up?C$i!7xeK>cdHfpzzSr%qVz81!!5G zkZk%2Kol@sX|U~H${QQwZ*O2>e$md|e^~UoyKncCc1eDfGBg@3Gda1e`n%*fNO2`r zkvpCCnMg-cU|Wf}{J`{#%1TBv;NhHdX=d)YM6UGw zmVbQxMevb2fp~EV#3cV_(#o`Bq?DpP)e1+ESB6WZ#_*E=#2B{N2`pQyJQ&GmQrS45 z6>yboMqM>q3LA>@>3=Q3%Ek_fF~=c!ya-ec_!cYB%L?3=lgNDw#RjZQ7r(@&k}ggW zbae_KQ)CNLFu)p5l8%Rl!wQlbTb6arOAnjb(RNB1zeUZ|Z+#Q&obvCEhcwMyLhx-@ zG1-c(83^23Y$|1ykF4CwcI2JitQ+PO-3Jkhqi&R=`3Z@?X;^nxZsLUOH}-xmbPy&sbl zg2!bhmSaQEsSHS;Ok7A=nQ`%Ocvi6_AdHEF`7x{B+#dfFj=!t9Q3z0BEIHwB zdzWwEKf(R&nv0oDraDgsNV$&SL$)t1l~+T{g35)PG!Pb5nIh-Lped(hOYm0DHyBox zlyrlOd%;r8Y<|;AQ3BlXwG)#kCo{3&r#2-jZCDW2B!(4Nr|-%#^<8$I+fiN#S!JQ- z!p#~SjWeE{ozAs2&8&buc5uH51=w8?tD?)NgJE#u6; zyOaFSiDgpT>=e4k8!tjM`0;;k7@Ad0NKj6M^S6PCPmU0nK+7II?IQ=(&YnVT25reL zw18m7I=SDEr^#`Q2>~@A)t%?Cwi~1M3R*g%E`ilopiC*%$Iad3zWi`GxwCh>b}LQI zJtv%sML=#o=(j^V4)sR-LcQXp@9NF_B9d%d9Z#~3-Db^^nJEZpvdaA2)Wnvp@*JII zuCTHl-GO*4hN7#D?c9uVYF#F{6~u%jwqV=GvQZ~z#BmpJ@>`?i&eDIp&cUffa<`vn zY@@7mKrx7Hc!>h7Gd)mdK0g*y>+_T7vEYebpGtUTrH1g&GJBRPv21vCa@*4L4NybC zcv19%Q_+(K1$&hFUDA-ixeSZ2HKhE=Y8ebW_3@^+aggp7{?V6Mu*l9-NTba1imcz=y(F2A zmz|@Z1KH(m;R@cYWD8~L=iy~0{y`k9B}vfi@2*YIM5#BQjT0&2w}~lWWgOUNaFqG$ zikIii-^c~GU-;Pv(DU13IZi~VYBe?4beSb>3-46R6=_RWXP(X2Cob|cuCnx$OI@Tx ztGrLGs?+HPx_hT-YiH1YBo^v8Xx6_yzk9oF+RTc4=||C(_Y^igZER_uGY^Q$Txe5( zzAT>;vWqhlERz#fH&Qo=PJCgg%bi7qaO)9aT}_dq;>*jAJdL3wG9f9~2Hs4-qzWCn z81Ecaag9yQ`UED#D-iNXlb9$BNSAGk@W;f`gv35@FDOq`MW`ZOOdVRW;FyXSgdbPC z%(Z#vH~`dZV}Uf0*E7DYMsw)v$1#2VoB0$@@N3l_7*@%I$!~6w6m6ge5Um2U-^B3_M;)JImWP!zUu-i^IY*`l(>Z!Sz z-Kl`p@|X>%kiVOy4|0{wPz2>lOiE^aUsaULkb`#tDPSo-EiQWrXj3EoSWTN^C| zV)7+>?Hi6yxd3HsXEPM0O?m4eH^GHJ>Pal8EI3gmCDHX_a#2AM$uo^Rx|E(Duqjv) zpSXjfm-eJthc_-PQ_oZiM3q>f+&+WNHhyhT$w|!kaoC83(*)P zw5FqjHJY4?FKx{2@+k!*KoxsCNJDIiqcd_lEDFm88lZTsHMh`?JOa?B()%oTL@GZd zVb;6+K?~Hrl%{rqYH!#B@|cr6ImNNv``c7jwGoyZQ18 ziaxIZer)MUiV_KqY24-(>5@!e1E_?#l~HjPiy1y~+@_{B%UN=n=^QufN&Ppgc(ICDiC2lIq96|M@&kF7_=fpjdT_Tk?6MPfMPiL)5EW2d9Ys`Tax$$jU_mPO+Hl0qe4JF}%i z_NV(#{Oy)*DQ{aE_L#?SuF`K&$)~%r+5H)rD{T!i80U3332gBUi4!OvE>~B3BTc=l zJ54(ee%UVGrgLfev!Vxv3oDE62&h@_Gwv=NrJb;gV}*5T9FFiNKT!%F(s2oRiP|8g zL5ya=WN~)-US%-HznidjS!Q=Hjd!=X7U{#BWJzMo>a^+!&cGY3>dDKU^tXIAEgwz} z1`_GNP(Pse4r3mNzkDlUuGK!T7_dNw=8F?MA)utaww25`>JZ5HjzrR(eSQnXVOUNP zro53x;*B}r`o5nQO3PBn#T;^J^t1*CE|D#iKO&mU05SBxuZwg-8^(L z;B>7F>xlPWHaOt-An(q{G^+10h?7RKTe2`-Sf#G|<%cW?D~^^16L8Wg)q-st-uWYH zKw~7uP)2jItw`L;>lX8KCC|$0-(yfLSDQMI7|*I@S?j7G6TV zPVkTMn|_tU0?(S9Wa(XU{Ni8xRXa)@(H#^;3vW8G#f@jpomQvu0G9ztF1go##VxC) zjtk7uoAs9ZIOn(pUE>k&m5j!4$DKOm2|7Su+ytJ#lnd8 zPmZ@8wQ<*{EGUYy6g^mgJm7l4(V1q$X9U z(+UzCgZ;6>cE-0X^~L3sE1>R?PW0}fW^xC70@9pDWodrFZ*(WA3&UGbt6m~_9OBw3 z!h$LWW5SZp7eql;v&14}zmbFo8XQE|<{gL@$q@z05hn*C(Vn2}>pj=xZ(nDzX14}H zx-Kl8IerQR;xq$Wyc2!~47ZHhg%amAq3XVuP<53g%Lrqak)ZgiP9tq;`0+9)^D8PH z!HY>72}zeWwDf$j2l`+smT}PKlLoA!o~Z{oQ*z;VkSKB*rbz+j>*ED^`jA}IkIJEJ z_$&pNe9^C%mB}%%!;i ze_XV4Fq25S9PAD{#NyjTG_E?wqbe>S8qp%cD zh4yFt=NX@)Ik2cb?>B^CAm@oLn@?i-yM1vrL2slpPTRB%6i4v)`B#+2Vp;RC*Hx|= zM^9)dbna|i4MAs?+=3!RVCAF-4FE&2M?KD&h)-v}Mrf~2UnoZ>oLg8^c1d+iIFIi` z^R~E&4qu6n!kR5(n`I9vSj=a|-ZtirC8>`(egs|ea<5ofDLH8U2F4p*1p`ASRHbR- zv?WW&GR)deX@bn63wIS(BB~~rn)`^AM!l`YWzmKQ3ITZdg9i$0j*$BquSJq?M`jRV z4|6$>M^`^k=q)?14UB;4$T7=UIEXo(D0wOTZP$Wmq+&}ZTb!psq|VIegNMKZB|Vjh2Lm^yTP#C93CA&NE3Lgs1gTpU=CLhY zVtJiKp^2ptP-vjE$`MChd-xxIQCN4Bs6}(?+y?NKM8dvQLTv6V=X05_()pj;=IT zS>u+KP=bXa@PgjoVh%g;Gd@JW_$9R)ls)EBKWbBf4A_`JhPMn%wd*R33SK&baY`4vFB6&2mRAB_F8n<-7fzO$OMHlk=ucJd$*WuhZPY|hCVS16=?ndd zQtwwn1A7Er&<*S|3~($MrFN8Fm2qePb@kR>q|~I>MIU>0`x^GnwGX!Sv`=bnbUU#m zI^$Q=p^P(T`N}r8Vo}+Xm2obt$w4OI(%ZCi71Wa&<%+5$Wxe1Q#vSNT@>Z#_#ziFD z-2y~IZE!?w59)fcKRh!a2B(>Mhx&B9HJx-u{8hQJ3CS4frcEri2C>0&*cf|J+&Jal}7ou@brgRW933pPf%KLl`k37*Q}y<5_N}+Fb_e z4m6HzUhJx@_%sJ5$z08y9ewrJlr%nsj0Ex`w1e4|;(e3i$2=_^XbSi5F;X*qyl|3D z5N8~?VoO@Q*63@FIUqPdkff5<(7enrBaO{6RGq*PfhR0L&Ud|}#Ma~#hvjgnRc4YX z<2Fg2Q`B~2bWQWC_mK*4Omxq03QMojG~<&S^e9ACm4>QJ&q*7gWaIr727v?p6!{+e z(-y=<%J=)UfuGh(8;E^r2c!cZ-4iR2ViiaC>ZA-HX|!JiaTQGhUL#E~glKlQu=T|T zT*qGp+{Eo~M`R4R84g<~RF~=e)7!62tL-bcCNL6flfPqdiF5l4XZ76v#NkY7 z@*z*5h92^#rw+L9*u#go?}olbp7!E2ii;pjW*6_l`1DlOCLB>i99QwV6j4`a z@6X~*`16C_gg^6}@aJw5`XH?=(i?LZnFB-C5C>@5+&O}nGarB!qzERYu;;*}(1ULN z5O*ra>4^zzj_&$32eTh}#_VW8R?-1zVO5qEl9O}?qy_U491;({;f=72Dq2WqcR*VB zpc<~KrG=T879KcgTA1;)@IW;!c*EaG=**cH2qa$)%SZ~6G$VODM_;4 z$9UeD3w~!BfSK>+DIDqQFbTNU71EICynZ-32w~={p+>WdMe$jsk|0tUBnW6ESmgV_fzS zMoLBl&DXjWTmk{R2EOCG1gdYLfh~7H zOKP^;`2gJmf#!>*w^VldyKjVv+F2c^E23&8M9MlRI{@sgfPQvo1)Kw6YDGE|J1xMrcQbkB%!;bx0(Lk^DPhvkg&&GUK@xLM2)q*< zyff=1cr9mEl%y9`{p9EsLKB`%@qA4Y>z(J zDjbpIm#HBz&DEif70Q!t2Nj$F7L!qIcg5!ytR$9msaRmFkBdS35$M3nv>w z7+}k4NgFv~J&!aet4^N-CLJgXi>Z^;`AD_zG=zcGMzzc$xu+!X1WI0Upe#2{)+B5G zp>vGOvzH6sUk={~iKsZD>(K8Sj-CqNHif~?-apJNoZ~7CbD0Q z#~)*)PW94-N|Pr;#2_px??_ zv5o|ai4GorBlbRu6q*&!=Wk!yioAm(@I4XumpV6~1ON#o5y%W)I(Ox#Dx#PqBd2q; z(}A6vgG@|w52*63O;k(G&~`vjd-^d2L-b>6M7Xyh{B}w|Gb@5Gmd)a|LN0numV7S! zm1W4{p6~XwY-L-T;!Qf#Q?*G+hW}FMmjYMD zjh{!xye2x~mu<_RiXv*Re2x@2r%W75xH0!+>9>yGN)F!JhS%Qp_v&nSt&&)O{om;A znmN7wRqTi4h-6PW(V40hBdI27Rh+m6McBZhN$oqHR&*j4pX`;wBxk06!(j_Woc#}9 z`-iW!`)aFp@m7v1br6cEz&kl7mY8&raBB_T(BTW#i7t#(N=|4v1&3DDE&V(lNSOit zaDrXkvK|P7cf%W60$e3hpU%FvQmG8IQcy|bHUFba?=}qyhNf6h@DnIwYvYdYzOuDY zAN^Dg@QPlXe$Ohcte?1zFYjk%{bJ*!AJS5fsc}N!Htmqp{&I`N@gW$kqef%yXEy|I zfKZ=q*I-y1(YRE#p|S;uZOq>Ag@KucXufV@JmVwN*Ru64$Ino*+xr z;7eSy7TS<&HZwUCM4KE>U7+fx_fM)oLK2?y^pxo+GuPNJz_&t@n=)@`x_7E!_3^IC zlHOk!YDr@FW$G3Hre24i=Kyw3lA{w{HCyQD`h|B8{4(Pph%Wq>jvhNgWkx@&Exd&j z;2Z}2?APBKP@-biG%?d3u_?7_oNYPM7W=Z-AFNKAR>dvu$Lf@~4LoyY2tr_&arQ|X8G=#(#o^rG`v&6YD1nkh+zKjdKu6~20iggWs74kfpE zLbWQ7rp&s@9*9uCsfNXv!a){^TFp@ zER#ZhHz{mnvNB0@mN?=nt4Vr|%pj-HnYAwS2m$H>Fjm8pm$o!yFQT>6H&%<)Wh{vL zSU>7eK6tY-c(*j%v%=J&VaFWZg{4-WBOmm4I6mogWaE>=285eqK3SXDV0_Y%W4m}B z3z3z*UAj7)A-7MmvCU+G5_ezfxFtf+fJ>e2Tf04?pD`C zx7q$<6FgY4+P8FgwhWoKay99hD=FnSYRXt0ebQ_~Wq-t#gPiM2)?w;NTm&FmaX7bcaF4J7WQYAt0O!|1EB!TS2P*kJrd9NzXA}lQ9PW zmoyO-hHymP9z9`IaeZ1j%}y}>rD^Iu$;(QnnOS1mKOggnQcjYZvkc_G*49KbwEP#* zfa5e6Gr~Q&0k4gWihr}!8P znmfPe!jCtCLBN_nKpF8@&NWs37tq@n{YPVQrFY){namaKTbV1eFNFfnN@0nu;d^E< zzK;%m;0wdqId4-GIMWUAumF4D(K&3u@3e7&E?f>(AE(g6vbCVPITh$NaXCVQw*sTBjU={pSd<;Y z=S>`=nt)EtGCUkaZhMS5j>%eeosDEw^FnXPyOK@pCl2bWhJgh*+?TAOpoIj%pK6Gq zt}yhEY~-Mn9JBD@I_Md9?mHxt;C1S%Cb<&FX?(RbJ2 zi8t|-;GwTz!pM29oln!aW0?1tz|`n4!3-v3SXmaR!-PVU9uY)s3=xXwctmi`fdi7$ z|3=L`>gy;cn2@In(>BG#j_$db5ImGHc8{t~G1p9fqN5sQKHSz(4Mt*zD)+H14Fh)Z zfmJQQrO+0fD4aj{EM`5+Z6orsIE)k1qn3uUUOKJ7&9bSC{?MZPbyhN~Se2X9~7I!_)B@?kWec zh{;={=u3;RHtWXcDSCla#n3_o1(7_eJU@F(AA{gu^qJO4`rj?5RJ=TkFxw zzgbvhM~mbklIFy}Z2mOmZ&vXgQ!Oc%99Om-)(oYPW~Pb+vGR*Nlc~P-I!+S>v3{sh zG}&p#wXFW4FXT~QF`khNB-X>>Z`Cp}p*^k&S!6{^-d^aAMvgAF^s-ZcvxMIP zlfkLa*mbP1d{Eh+R@U)?-^M|E1yn30T-`VwIzBHqUqO&pJ6>WAJh@*RqX*XDPqDyD zR-2A}^G+QWd2%O8e6tZ1^ij3lC1r!lO(6s+1ItZ^H0){bW$Pv(6Qv`EGk-^!%StoUM(sY9Xp% z|N9m*aSM|ru_pdt1Ikub6W<_~ml%7@i+I*6Bz=-dl|>D%sR{-S`J{9mDa~ZbNHwSm z1|^x|sE$(EeO*nADa+<&4Wxk~jPt=yt!%0(F2=z)JK<@`nCl>MK55fZ{71TwfnF?F zasJe!#+n@rAc_O~(i~rCN%?|+x6qq`%cynAI-6S&Nclgq%+1G7G?Uaz1r6YL>OK=R z7$Rse6PGGPe|DTf8+;^NwOq0#;S1o9UZAgX%G@no#~1^#t(Y&E4X9=^0yR7#F?48~ z9M6d*<~GpuKJ_ik|7?0_W~w!=ST|#On_2d(ZQ-}nFkuW#z_OMfG7do+^Zs;ur?t#I zS0Eoqf>Ufl>y7V{e#pDj_UjJSBcmHP7Z=&VEC!Tm7rfnV4GA;jR6gqElbc+W0P@9R z)#|oz;a0ffd@8;gN894R zSkixG+0VZDKnJugf7_x!sRS?dG?#ZT*F`>f zr>=@~-sU(>O;*0&6S@FXbcft-ER{veunxbTcaS>ilu+n^pX2IpST6?3+DqC@0}(t; z4*^MyzH0c;avFFh!v^bD)W#4PJOHzT3Hd}4?p}un8l-(R-kf1osaeUf@yPj zt6UVl{?dkBx^;jSCIj4ChPZPStLAHm1}%Aj85?t`ALxB))q;#=Umde-&Jl4bcJ^>$ z^s?iMeMeQTv3gD7;zH@ZX!mi&Qdy99DZ79}ei8k9ePf{9VQ#ij9-9#1=qsiWUODg@ z{Kk4W8|&R!;-N`QQfTCWE7zm%iG?(2bnK-ETvwbpkUXTfePe?Gn_WYrl6GIw(i<30rZZ z$Jhu?=JfKsl!K%b9kgkn7FDrywE0(O{OQlE%l?hv_2dfaruQPT5Lxl`v6a z`Q>;jTg891#(%jljG(5J*Mx>@zaa8V7|d5j!IOCUe{Ze#8! z^mG5`v7GW+UOG(^7)>W|S~!xy#a)T+)5~ExJ?+b`~IfX`saEWQ+Y`W-Fb(T5o< zsN%yPfeqhbaEz678CGTxZlNfJZ2)dIVgh-0GdO3gywa`}KyTtWyboZ^;FMuuB$&5g z-~kP*d~8@@sbmZ*bB{aOVgyk3f}&QSL>`ewnAM1^)#D=&8D$aq$8!0Lq>n{kJiU1B^6Oem9*y&52R}|WYzx0J z8eMrt@r=g1qrX3+cxL1M(Ra@%Za9+Lrl!=%lr{IwrZdg%|MzI)ldRdFJ*jwR?*6vu zV`p)n+$Wz@yri=rLvpv$(0GpIdY43NpIkh;RM3tjqRE#Nx!!JipU$Q8nAs7%;K{`k z7Zs`daz^ytCl}XWuDTUOXrzO5GhNL0rrwo`DutyBaV>MrT@eoz&r;`Vc%uIs$4x1J z&DE zbKyC~Ev-L#BWF0X0_ipTeGXr(zEqm9j-C$RZs8>`-PPVwU6*iH;|7Bbovyk&8; z`P|}`#+#zc&n^C4V-WrN+;q@k0)sKdzAt*wc{b=x=h>h?Jg>MVH?}kCI-hTEik^Lb z@!hnZKEL>aaz%rV7qt$NPO=#Qzqe26&N|(J7WtXQYJ^qLVU0S{M#G)GH0BZ=-i>L# zC8qh#zJk49NZ;qYIGavY+V*ItL;K@;5{$o{r|!0|EaeH&@73of_Jn?CZ%pXf9Nw;~ zHFxW`L5uo>$j)tL1rwR!&ZRYQYYt1NHBhsXmMl;uDD^jS@m+eVyqrdx@;wbvR>gI6 z&zz3b!4w@(iKRK5p@C?Kc&+46%hN@IGfUBZ3xNoriYjJ#Szn5We|s2Xzu8m8{-s%} zu!fao(Y0q3SFWF3>M5b0E1yzI;GL8bc-JU_FC8W9c?Og)fDf>m5(bwUB@FXJN(lqY zJSB`Q%Tj_S0UujtlrX9mbS6HwBF5>mEDgBVN0*5Pgv&k4NF@_8$CuGg$n4@NWa_g* z&pZv8da~h{37Ps$eo2irLH38oC0_ojl{;M0q2ZE)AH(_^O4_BTaLKbAPWzrnsP;YG zKc^@4Fij6LvU+GfsiR&&UOixOXlFRg2f!;^13e}9$Gyvw=Z|NES7AGTg(3C zhRpWLa*xb`Xy}6CsvO@7ac#V0h--^Ja{&_h=NA;0VjrgHqr9o8cDbQX;e;3&h$0gz z!^^X{)0mhc%MEwH9xKaWkEO(u%XN4P_9J?Ntef@ZLeAyj0K*DEU{<;@_`@M#e>Ctc z@*_u9=vDk6%h4D0Hz?k&r!r=EhU1)=((EdD!(GcgE3)X*+|ZN2?b=ZG|LnugXLWN;(oQO+F8m$jj{2LkFc|1eoN*@hTmF`vnvc49URtj zJeFf5Z@aL#GRF_XM)dC&7F$rNK6hbpW6?a{>>>=d{jE{&MU)_0d{MEpaeq|0sCZ;~ z))y@_)sOrw4vseK8i;t`Jt$Bpt+v@J$g}Pvf`1`t1ip)lHYI9@F|HXcqE(tMM!}_g zZ!5Qiv?^gd?jP%+S;eA#|K-32;Lat(c;NIkrRB0$ZB)zxK^r0=6N?lbMjq|XDj?dp z0`ea9%=31h0`?eB%LsV|hivs79qgth$C)Phh?3adOL99blM!sc0wPjF7~r1nzGb$w zW#aZ4K<`iSttFWnJ0NIe!|hksa)|1QZ+oAfyhj=y@*PO3Ya%tCmIyBB=x|G^)4DGE zzDIF*jH$sMtqRN3v(LH8D(Tz(9=c@|jjT$L3M+ITRq zRfvZow^%lZBs<*f2O8Eh4_ysu#1TK@5FhqZ-?i_kku(U?Oa{1ZOs~2 z8`mAuP>!BreRFtq?T!zjrzyrUDqze#`df;S#%3>$>9eq}&$E8=Bdg=d?`QluJY;q? zNuC{MGE&uKx8I+P%^^LWYXw>o#Xau17YveOjyzk&n*piuaos#jnzsiMd~fT+rCaWA36(a zdnt=m8>YYHbmdAZR8lexl5wVx3S)%z)@dYrk ze@9Akgm}?CJ!b|RbN7_`+W)b4h;~T2k`v=+>(n(890%`E?%9YfBw5WM+}yCg)VmB@ zm3IBT_ReUpP7!*F*7w_com}U%(r@^hM9JPhu2muAUg{QBq&$Qx;DNn7j|7)`0_8nC z&FeO%FDiZNRx~-*8`H`5a$jF}Q81M23jL+O&((XC^tGvS!;7ulREmK<+XVV|BZcAk z=P0LA4EAMl66WDO-ksp1%G6BXI!8T7{ECX)u z>(gafEv1!Oum;fUV|)v@q^VgLN4nwms%yCDq@Jh}ck9V}6MAp(NKfK)jR+`9QXQF9 zSK_`00pyT+9Hs@tX-glse}|4tWx&mI8pFnpv{j5~C?^824NQ2B9$5n?1UNEahH z5ysrBF;8XodV<6W{l+TB^tp1BoBi042?md<>9M)QxuG3LxehBw!8Z6?;2F~sezH?f zura82%J7o`{Q++~3Vw3Eew_593>+1YvbTDaRJE97fJC(*e9`z?NFHx|Lx*S*A%DL* zXOL+}Ut0%F&vA^wB>ibKi*NXh=mbOz;9zfHlEHnFtK@=oF!MADsoLu@_XNduW)dNP9IN zi<;z#xI5bT$Fv`Jl5wRfZEBi6+Q-_WS6p7~TEUol#Zd$@ig!qEiB9;F;_?@dtfxn< zmr3r_->T3s$R8qs0e!)66@4CG@0YheM$Vq~HO!8F#@Cw^n&4#+k@B>G=%as9T=BfS z*9W(mBj@@JbK4GV=xt}9Dj_?#A!Wzk`vfu;rKZu;ybW2;j)7}vgK^_$4{q?>c(`_u zlp7E6Q}8ie2dzwX+u`OSq9bPzSN#&b<4~xlNu5l-pKAyC-|c7(NkKJIvU#KI+{v4+O>&}C;{NTf$=68 z+Jpy$-og=DfR%7~Q!0%|Hc1-Wc7&8}(V%SpQN7OR+x4u%^f-M8n9`J zWw%%|PoecVu}P?2)waoFdNR8BisCBDqiwm;RHwhWqPY5L%TNFwQUJGChD+68Q<=d& z_B$dYvFdh@A1pK0w2ig0eJUPo#tk-YgFXBHwmx?r(TFIXVmh{ryjc&QO5?U*7r-ai zLl8{>sx=8EvjDWRIa>SD;t|Vjot4c>e{BnHYql83K=kD+i)+gLn|WQ)o#FxgDFmt}wj0b^r{%&1|slu<2?@uXjys@bBWy-XiLPf*j4^@#?LgahC*la ztbwR3AYsZPl(AQx^1Pd;C}x6ZEWD@nUGwIRx!dUm;XTD?r19onDf=^w*2Z&cbHds; zM-RNPSmKvHG|yRHHfDZ|8(;@dc>wzL#B$iKC+}7C`}i@gv&u2X%s>5tV&4{xHGT{; z(hHNk086)Z&f32d$E3`B?=d&Ls@PN3n%HQ^*!-r{WwoV0ear!&frK79Rx3ueW85A5 zEeYMPClIRWHzq!#`HURvCOl&k7K!aXwtS%8k^0B>9)SY5hVU3sMI{P2sv3BTKGWyG zXF-zAN%{hKP3Mu@=W_*?@F!GfZ)kXc{#iIXWh0)_HR_q z4T-%SW5*t7mRkELxfXl0R>`Juzv$Zq1VnQMw1_|M)o1zY^*jEY37+DZw;v0CyjwpG zZgCKrI5y^wJGaa`G>7w`XN^9%MVf{#T}86FSf6g2xT?39RX1*C8X9(3PxC48$MSH* zk2$m@rpet~qRNYkM?_crc`<+9$QEfCqgnz>nbDJ_?6E#j8uUao+^#3@jp_Hwac&i3 z^ypj(+oSS}ifgRHjg{jdkK=f4fY&f*;XC}xB-ql&rQ~_NH91EuJ%8S9%rJude5gVq=#x)1{`7f zd-b<4J;@&;^9g;S4P+e&5ZPyR)5!!ri?K5k1{mF5uF#>7@X!6E%23fUS$ zX{2RuMV{~C%W&Gya%gKj%lqAU8>Y9~&YIC2!#E~SFB*1+VPWdCTZ!On^v$#ChB+G+ zDllfvK=Sb;pCPix1~N@(`|*7pQTfHil^a1aFC}XaAzKKyd>&pLj8lH= zGSq*3H1XPEUk3eW>nW6LG;Q(|cxsdDv-Ds6 zF3U`F>3*|3)urFItN4li5_`_CuYSu@1$7dT}2UI!k1o!b(Pgj&9IP=Ue`I2zLkk) zRH~ibP-b>-muA%A$!yo$*$w;I4aKt?nSXY?tJyvOfU`^I)ycd@nTnvCYE8G)_HXDM zWoFO{fELY+M{8b6PV_j9+oU(iuAi_)aXlXj4X1BU3fpv42@>3UVy`T-fs?e&rIPU7 z-6wh#YVf2ithG79!nR(#WaNsUB-0BHc~WmXnp3{C$U4lkkuuSEU!Amc^gCk;X@BVx={xTW|t3t~|kasUH8pNCyk#39bVdaT$Mt z&e)@7Y8e}If51O&xig%?H4&Znvf{cwVpB~%fmf#NReAa*pRg^seX$gU=_mB9Ew({! zv6?l@nqGXOOwVlCF+HLF zojj#e4eH4Tzg|z?8_@5QzO#Xol`MDhK|NITXBHF{>Ce_~&zc8 zDGfEvP>#({@z)VPIb!A%DV--7sCaXhH(BT@rhHbOsD)YscG;bbh`T}nL10;Kdr)rm0$6WU zt=Nl~1#K58_NSH!xM0*3gRY*Ss{~jjDri(JQP&cYYSdMuQkzz4W6O(1iyAB1RQbvO z{rNu6Iddk-vWvFA{@E9q+w(l*W7pd?0mq<*ry z(+ek?8e$r3&=G!-d$p->p-Ep;_RS64!%c)A@^Mv|1Rp|ux+oZoy-bC9j zHSx>Tq?0rSZR+nPr`dhFm%6qbh|x8k4M@I!OZ7Yh>H9P~h-#YG1Ru=lJBGZMVI+0& z5vgm7P+>IB(@g5xl3;{~<3}&A^q(`or3M@=&{l9WehRNVXBV#~bm^64?bIvn2KD>Y zDQ>L6QxYKAAAkI{mBZoQk-WHf(`q$CEAED^b;15lpVGMa_p8Uo`phXdyIGC6Nai}_ zG510;*H*XOS)RCy_a<}cmH7|qH|9OBnM`bT^PNxUyKk!-yuMVv)lMUyZ{gZEU-~I~ z+=+#~TTX77)MF;Ma_y17kbJhsn%z=VrkaoARZ6v6&>NcURnP`nyAk8R9?XY zL%a$P4C*uZVV7Q=zu*SvFSwwdZP7zpK?ytc>(r_Kv``_$~wVESoj2f7+4Vd_-f zhBAq4xPu-uEH*ATF- zn%tJf8MJM1UK8;pSDMu{f_+ z7^#Q!%7HO7=dVT+U(kejGtYNk{Mr{S+LqJINotSKXR>^{k%#6>#?iZ|tftp*!`S*^leTg>*Dc%n728EE!B) z7w9_pG`b8Ws**w8q-z;^T0Hxgl~pI33XGr)vt8WpFVl=(rA(OCeVO(5Wt#iq{asF# zY||G{|5c^<3_ILY#R<=;wGQ|+UUk4W3>&RMrnnoNj7@Nt&eMHC^02b)rV9TDUj<)*C7t*G`^p*mp{!rkaJR)179a z_J_r;s1E+>aoSjze%-6dXp3HmC8R-eOJ!#|&<8()9wM64EdWLBn#?EjWhnhjs=4ebKCVGc)J_s(C=pF7n zBR>49%Hc;&Xyj^vCUPTg(m9NukswbuqkvP%qRnrK0o#Iv7l_0mqB8$%#iJH;wpiH>W@6xnhb@FGRV9-F095IlIhM$uM9jxm8`f1)j zL%NC%|A2+Y1KQ4+Kf~Zv6c5^J6i%#Zr+1uCg-w0UVl}k z|D-`ftl@uyMBMCO2PP^{emL6#lqa;cvh{vdq@|YS6M~(_9C)$Xy z_!C!Ej;@=T9>uN+a2+(7hpo;>6F-6omcv__Fu{xXnFD{`wqsgg#-C>=7b^4^43+_$ zL6u9vY9DZ&GErR3!$d<{>1%+5upABZApxY@K98$pyVor24zgrQ6NpBr7AYs>;lKmX zD4Cv`MsI)-ZI5{wI!HQty~kpEjIGIOK4fw*n~<`ojUoO5XXbeAqZxDKok7-x z#^yLd+YH>t*Pm7?ZyHY~JbB;=Pw-Rxfx*h!BF^)j@y&yfJP@_Xn%A`E#p@KW zaBUw;JZ@(e56lfw6YJobcZT0H8}9>4nsOTgNjU?#+biM&26A^)`q^-Nmi^-vBzFzu zK9W3cKF9{Y*upKu+d4oDXr4UoktI!`9Tg!?$Y3CBIFS2j#k?L>PtWb$Fo6|4)p}1< zyxjA(L)}@o2t5(yWw+^ZhUum~(KT)J*OZv5H3k_8tkZnBTLPQB7xwW~fALpv*(dlY z9C4>!!5QOv<@(icZ?x;8-IzW)akj+Ay=n$)M4lLvhLR_-KX?B05z#sZelk$ z;9mVU6uJpdb|*MsS5sVt?I$cpeKJ%}6g%L65xqzD-mOUgm_PNp4mzT>@1JDwxvO`)G~2I~1j^_)n0hVSPi-bYLO zshffyVKmh(KJMq8s+N7m&PrdWrL+`rk6*g8a&jF{YT*Z@?KIEmE}yM}ZC}GJaQQ*V z3FcEBB#D#wSy?tMaFHRB3Pymt1-lw>cS6mD1T6+ATeYi{k^g`B_1!F4ov$wK>xd(CaY@FT6m5HbcmZvQgzFTaB*Bns`NlPy#DN z?qYM5KRMlV_QE1eZ7T@-x5O?=;^M1|Dd!4{(*{lWv=_D^Y*14g3^vH3vU%j9941;D z1A}{>z9OPNpG(J|SuLXzu}i)Nx8^i{Gs&PM8>iAnJ>S+Gzip^;1Tdv628S6;@kk4D zXVWvQzUa-+TL)xK3kJyif&nru*u~x^3w9B=K#!FG0MMkP_fRB6+XQcxmc-!A@d6e&X`|=%ib8!t+$BW{dzg*dP zN>Z8!{!KTm&oq&i7)H^14nUs}UN4NN4R?uhEN1>=Z-vhiHnV-zcH2dR1d~3T_LjrL z{WxzwwuSGouaitd5!ZN984t+qv|F!CO__!43j2$z%P9ej(e`=-t!AT8L}7w$X>y5i z;v3+1v&khE=a#rgTh)xVo@Y?FBnx$)rw*Ac)JZafwk&dGJcXT$dGRspoJwCf%7ZdV zE*8(@!o~AwDVCobe<5Evyx&fvRvgH+a`kyBlAT^u$^d=~xs>CJcM`8P?+eAUv_<^_sGFDIE9gmqo7;N^rard|SR*PbmzS3Fk8C#R&k;`K}(4cGL2d}SG`-xyAH3^%O z6ryYqE8rlI;~{wZN$Q{wMQ8q)K1ma+%BkcL|yam zi@x@KrnSjWYevil`G;jU;+O<;PDANuAZNC6KhO~$61=6+rl3tMOc_k0;g|cA{QFHeEn_xy{Jlf4WkgC zrp+P)?Y|ms9MWB-nt)&1Kt1=?UYNnC6BSH3I&(+VcFsE9Hbc(JbJk&6kmn!&iFSy*_FAfPF{!W3ow#v3_bN?{@%*A2$~?_w3*UtpdUmx}=XB;dmkXQ+ay!>T zHtY-gfm1khhy9j9RQN#+rbHynQ+VqY3?^xdMj*WtF%zu;DVNXA!k`|CJkt9dHC*r)-cGV0jV~2 zAa_r%%Uk5K{4YZaRIp8DSsqf_81&S0f~>R{mM;bSbQBfghu8t1wwp_h@D~~uhUXyC z#L=wwri7{OA}UZQ@hj|q{Qcc9GCGH}Iub`G3TCOh?6k5c+XE)|;4OrEn?D6#P013& zENZUIoVrbFKBndp(URcryh9uuE2z71>k!G{pp_je64T zX-b4NO~mZNGN(&ci@x}zx@V{)^4iCv~2#7br%BA;!OGwE`FydM>3wgOpZc1gP2 zOQ0cwou*knqK>^u25wq#2mAUTH5CWxatA>O9=9A~y4?DsFh+e`&_R3;IJ@L3>e@6x zLirFNL4aTxWLfnP8DvxHa9}`K^=sIK|7VcUW?7USS%J4oOD{IXy~~T;YR#pQ*!@hw@JOcPN37$XF6H0Yx$1C?ChR8o{td)9LdH|spzz`%PU>ly}o*rwo;hhV-w*R zpC7p)M<$9JpG%#=*?(8u-4+K|U?@5f&BU)2!;zB1G($-GLsEJgkH!xVws0!%r>NB) zg>n`l!NEP(Gm5lgt~TkM!^X-=el%eGC1v# z&J6A`QGJ^ZjRl(i%t<{Cc%A;t(z3G$5hcb4b$2K+KHjUc9L{VYJZS>I{vx*}GpCdi z+7Uh6c4wE45 zMggn-);b}PpOQYX*~lmh|)QjJeKyWHo{>8c7gKfCP4 zp(uU%#y{@&_g!D<*4A+N(3$bsWNdef9Fo-?v8rTVKR&1gTgho93h)P3vp*mRjJ~U7 zUWjJmGjFZ*=jY;=-C8-aNw^-ftsQR8gE|CjOUCmC`y}+~0DW2E`tNV8Y&>FKbb%vL zt#g=gP7g&L)np3WG-p$hX2ff=qqvKqtDN$+6X2bnm)F*vs0$yFsOZcFBt*09a~^!V zD-2^Qx_MtBZy_V;9-JG+Kw2%HRXn+H1TXQ18@k#h_ErF%91=pN2nV>GTVeQTxKWi` zOB_tNTwP#+vjm` z_9PbmQb8D-%cNrTk}j(51D$>|Us(-8if{fg?oEy27`}q$=&JbqpH=$nlJ5LvGP-m# z*9Kn{fh<>9LgD_R>&gprwWE)=PP*(#C*E^WMJB{2BqFi#5J=12hS)LC z*9FPTz`JZ-5+C*ojCm|VlbfY1A?yj*>EkJRIP!z2g zx&&d|A9aQ3Eb2;`9*{Lq+1%HyDDBIRd(zFr{Wza=drn1NB07NqA+4(Kt8h@O)WllTiC0dxDkNT|P~`i!P&CpMfU zAG_yG-CmH&S&X53zR(hp5KW8MhBSV~omw+<=-M4%W2bIfYu53zlc_b5|DT^V@V@%c zX4(8;om*e{=>tC6p8WKsPd+bG8>DZ!4EGJ$B(W)qtw>;H6s%#4vz@SDrK15W+OGES zubZ}OJiA?tf-2RYINSd$ZvoA=F^D|q%AU4#W#?x1w0UK25|97no0rmj03DCH`Bkpv zSxe_=P-^4E;d!OL#I`z+V_S~RQw*!!@heGsc*E|<>f>T_w7vd@94t3{wDi#ig*F5T zH=5do#V8-Qe6rlPJxNY)#RG~EPGoyaV-Vhj0$X&06z$oQvZ)-*puBDA6rLVm^s&le zHGAGsExG>NP!&57x-~k&WK|m(SK|JTO0C;W>fuB3@-1Ic?T9}y(ueP2TcM?`t(ar< z>TQMa*4|U|<9&!E&T_z4f4zm>lCjgY|9o)@L_VU7AnJHFQDFo0BmJl`U%Blk)&RBdecGvajraC`U`(=((1RhX4=3Z9y=NUOQI0%^aM3UlG-&~ zKIE#FR7KEiouJkc-eTo}opoavhm!X2lWd?d#6Sp@-iuw~#1d@k%P~a{-i0EP$V5;A z%9q3iqX|hFzBNh#*1Z)v`xuTFr!LuzQW;Qk7c?Gyu(IYEZfAXZ6VtrRa!DM2d8|;#bE~bcje#45 zYige&bNN~Eg`ZNWixG)-Fq6Lk&XIw!yzc1QLK#6^^MwLu#g7tGP)Oy29UK9fU}~W2YAlb;{OLu@kq|6`4E#zvE#kYJ14432VkXQbyu^8 zC-nrdcaT$~(Lv1`x&pEv^O_#oZL0QqWccdecX?=VO=<$RP$c7umRfld==Y8ILci|0Vo9(V?$)T>XDLa%U(|o?uj9{jrUnz=BQR!-j;}=rg~e(C5m;(4|zCDgI$42}nSF5|Buj^bPe~-{{Hm>Iv#v zzH5rZeypx(bx{+^Fqby&K1U^lytwW!Uzyl_K&@3L%!>BU*BrH)!cXnf##5&q#_(YM zmv-+R5TE$?`>lTFwOIqlX?nWGS-d1+geH<~rnNIXMQ*Qd5~1s|O2#J`VXom3T&(pP z7fRWf^lEyMx=Jiadwg}cp{E{gf!D6C>|u&{$f(kMA#5}v>sJ2W*W5stOe67_w@ zfNuN?Y0b-v(Y<%c3C{himd<93+1!q^FBj^gROzbtr=P=D5)~q2jy8KXjKCGK?2m*S=fTeaA4!C`!2K(ctC%PcF>H;ORw42U9uDx$$S>_ks?ZckgSqG>Zt?Q;5W zVQ1;n%x~RPNe!Ah+{ann{hZR8_#gLGR%CQ~(j#eWsD^CLhnaoCLnnxCM^KBtBZsnT z_!-~&Na5tMwSM#v`!1)(QEoC zVbma-g`J#@L^5}|VNHWNfASxkSHC|mJU(?^q(n>4t8Nw)eCF0f6WaQ6=D=|5ViXDz zgl}%gZE0(6N`A{zD|H0W00qlCM?@zRYI8IbIHIG|6(tjBI)HstgyG!u#KZJRJB><< znC4dVg>;u}^lsR&S0xXp91BZY$xc$tXlqWCvQT%^@ZPgBDY`R~F;mm1d3dg9f0w{B zuBy@4zp_6yzdyL1y8WSekB8^F_dOj%vTtk+wu+2_wmtl;T?d?8)!j~M1Xao08o?^& zU%7^{UiF%Ry=|UjW2^jJzFD%P6!yXZMy*J~7zM5xpi4k195oYY{I00bMV+TL4y4Qx z)t23z4oW1SGtB0rq%IYF3#Ek^v;xFxtT~M(h;oW!oulkl8idRQ=sdJ}6$2^U#-rc- z3tP51K>-wYVVn+9za(2m;b`HXi7@&J<>u2(WA*|kX~aL*WilD3_+hbss=utqQ)(uK ztk6N$e4Xp;_RCCZO`zo`(ga$Mc+j#(yKO5R20D#CE9|w8Kh@GY_sUMaa3J@5RTg7r z^~|ik;%7xU8g%Ydn_<3gixY@jM$ zZCrq}?|n>`oX>;!`{IW!=y8&N_Oat+llM#1lVtu%UOT9dekJ^8?p)f)N)}do>)cw3nCys(SBCo- zZF9?1n3yNJ1Y7;rxpJ9u0!|HRW`j*c6b5UipL5?Of!mY40nJ4zS+pt=nd99C*NQoi zDX6#_rbPqvC4TGTx<^gWFleC^zm9AB**2IXeViH={^-t zRaod&eK()1H~bquVqeUxHd8BCZJDhW`_%vS#d0(sWc4N$Ex+8SHZ8Yq6%w}SySZe! z2pf0Xf*)Fb!DkX0QQ2y_6$6o81J8|)G*im zbgFWcQ?6r6HggAF9d?8GY&S?cDt9+fJ!PDv6BjydjGX73T0IZ{d~;UF#1$cP>rIe!w>N7LfcKJ8 zkGYy9V2PC!hQTlx8*AoNZD#{lv5&IDgl~InE%UKWGby$T|D2HVjrS(W(>oqGRxy>DCdxl`SAGyIg1t3CXw7BGyb2l^IN8koMI-tPY?UH^5)HgzkuZ-G@I zdaL0r7K#U^9qZzIbR#_KjoHACfu+a#lq?l|WrM_P-gZG67Ng~DG{Qhw^1sN?k71oT zy6MTq_!HkVtVx}3Lha=X3oKcnDtyL_6p?(M?lyP1r>ms3Ab_tF*T!orIf=AoL@GqKM?)~heqNEo&hcJOTd6?><&Sqa%W%uf*;Cjio1C^w1j$YBCqG3ex;jy( z)^kmBO+pU%*3QkQ?5BI(d{&uC3scR%1xZFA*Z)?q)*n)lgLn3n3Y776C zt95LisO>fq6Qcw9mtK>%3% z3)hv{sfcvWnCiK9MIMm`mIV|Z#x%L)K1jmnn#CWxr?loMZq;(PXjw1Q(eF@-<##ql z4*J_-5znZlNOP^*B1CLjGyH)*#n}m_q$Eo()stxUMkMJpWJkX)zD-N6l9VGl%E!D) zCJi3^Drx3*?emDcOPc(LzQvX3O70R@qOp+hPL~bK(V|7G`IP!SsnPF^Zt-cv4@sL8(Vb-oEz;KY6NUdZTG zQ914MsIHUl`t5oHGT5tEfD{S-fOlWdK}WtR{_A^6-T!~U0WK_0 zY*c~+5}MhB1J;5AE)@>QVu*v{0B+L3aez^P-wp@-&=%m)iL|^fCS(&L zWFm($-gs8n(=o!MoIb_NHy6uPLqk*b>&f-);f-`Ps}6>Hu_`$1r<7Ty8#5o8Nr0XT z0C7=b53O``7M;q<{)?prW@bi9mSG%>{YPO{JPnm&$}Ms9#d5F3OLnIkI1>$zO+>?I z7At+vT`)bk_alIJ9~Bdh3z?bM744N36|pE7YZ(o@LzG=1BM}dqL_D_QT{nNe2kpif zjZ*$S@wj5gI9G(C-bDXucWcC=pB#?bBL)SI858kh?!y?hB|OV=Jbe7^`*Gv?o%xXw zFU-zJ%A5(dM{=uiK+Ipn%LEh27&Mx2@1qCj-c6tuh&!=5<=!p23hIW*pgYxdABcPR za+ab6{q*h?_l|3gv$Y!c?yRwz>tgQRrc6go_aSvmT_Ko&aS!iEFvsxY!5q!pd;HOr z!HT&4eKMB|Dd^GwKB~6{nC}bS1-0jq1!2O>k2K?+V&E%=*K%bCa{EJP19n`n7zT?E zGXO0dx6NePo+7yf6VcEkxw)0Ko9YEQBYG@%r96fYK!YFaiZrf- zBz`BmWEk?2ckS>R>MkyeypJdvUzElXvj=_PLipa|U}W~-8_%eS_453TI1o$mWUe{} zr8O7`>}AGCn0Z>(XS@a7H)dXx@o+`*jRbljNc={vO?>xkrR!D3(Tf+E)X$TRpV1-c z8)P*yrTak>j{rc^e>1B#v$QEB--z>;MZM9+Ne^-N4=S7c-hpa}P~hl++-s2hAIM#H zX_UVr8oxfi@CQJo6i00|pu8STr3eHDt4mFU<0t)Kjjs#0i^I(26a$(&5Bk^CEE=G* zAdWsY5}|I7`m1f1CQe8g7tQs~?O|3G&~l8efv-{$-kN+P%p(lrH;*=W6CM69l?@xv zhmv(9X=k6R`eaFzGRY~N`o&>e4!Ie{pyF&oF{VqBEM$6SzY_RS361cjiSahN`72mB z!vw)_ncb>6JBi%_h_8I~+OQ4~cpQ{zl0wJ>zj(8piI=uWA?CgxV82Txr_30MK{LAJ zJvb3AH->HRiDMJ&9C(wwJ~#UMd%*jB%M6lI zywBXo9xb;6>HS^W0!(8&AZ#Jydh{uQO*QivNomq_DSa25jB#d?^7BZ9KAK%97~va@ z?+WQi1h5KGFi$Wi4T%pkG!LQp5Q>X5e;lF{trhU86gFN>5;{<8}U;ftQ=Lj63cYL_#GX;>Ol(fG;@5%SboWjDbP6GDYO5H z?0Ng2w%@z^>s-#$pJ%4FztoIJdR0pm$*+$qMdVr90q_vOE@gGH+Xu28Zx%jH*I~BQ z0pnb~6-oYwX*>xV?{a|3?GU4z_#q2ZFm_c78CP7B*YD%%Rt&#ZIt+ggy(5hUA*dbr zifM*ESC#gKfV1%3-Y8+w79BEyo$sNQ^R=v2YFSiRnnRk3%CL4#@q_Ms;QZyzP%Lk< z#l@8$W>VOSnS&~d>_)4Wx8J|AD0R^y`@K~DTCEQeNh|>ZG;O!tF@sTVB)IOYJo+(g z*iUjZ`&DCRrG}ZoJP9o6f|>0a*@Kg3%S5JIov}vvgRq(V^VifwDHWe*M+2G7`H9UR z2p*11;31y*(Xt_z@KW9<4LV=keq!RBDRKL;rYnV~SgE`#VR2I-95ZQdc>@ZQri0)~ zyul_m^lP@E2b}j4-G+YV-yh#mFYBuDxQ;rP&TafadwN8&r;S)qAX7|ffBgA}Drx?c z2}xwYCKt$d8GoKDV6DB|FS})8Ka!$uUuw#g#Bbm_-fI_CI6F_Mx&zn5>?m`ifsC+Y zqOdn0w?rwUB%BeR9_@?&{Ku8vi_N+SLn7-UPh%3<;2T}mFQ6W@=o?+8r6Q!ToXnQq z>eHU^%kheZ(i%%_@Vf_N7|RUDW~K-%4obAZ%$%`C3eiMroW>boBnihrl&N!OmXe{c zP5=B&9bIDkk;&5Rija92yp**YQ!ovCe$cQCCfAsxNspT5uj}7~8+c+urWq(R=S9+^ zn>sC+c67`A%Sn%vWV4&n|IR~|9*Yg1`L5%F05$^I6m_43o0UDHb?TsG!H87M}eWrs$e`-bMdWIqLTlUnZv)Nzy+{Xc1-i3A4HK zQs}aVGo0thwo5fFZoOx7L=Vh+L1!L+^FJ!9{xl(9e)gU|33x}>gr9f}YT)2I{J!no z0ZBl=Tf_N3$LZ*|Tf-JX-GSD?(wONM{W&dV*AB3x*DSLpm?uVuAiLF|(A(b>2Fk?% zF?WPJAA6N}aV}=qj>v}pmqz6&08%E@^5%zeO>^2HJzF?TxwTWy4YiOl44TnIC~J<1 z!_Lh#anK=yCXF1X6be%wP3XaN?tJ%cb#r*Ns*+6SF0Hl8$`U;hEo3eaNVi8VSHxR> z!6aTR6EO?Z^@p|>!d?0xJ(DpdCTg2#u16z;josB@KCwr4e_D zwiF>_Pgn^;WR19zcMyj04COv`Y@1dNlE(4^H!#QC8ypCpKJb7V2|tD$@niiEq- z)zH{>lE2XSvvOy_DBN)!2g7cic}}ggt}}cSdjtpD7ysig$?u2D5gOGG5*Ijz z@Y^^(`UnyM8fPFww$1=MwoT1$zKxomf%pJ(TheTy{66>Ueez>uM=7Mz&BDcXe6uYX zo-={U%;?K+WHzoH$$~mQ_;gy(onbi2&uI0S_a+1Z*oMU4?@028W71hTcje{IHk~* za09wKvj3iXs&d}Z7m=L8rKu=kj3N@ng1M9yo{(grl8#FWEa?j0+yVpiU%#sK zpBV12Sg*5AL5JPlA`@W8_M>yTTKR0^=r1?-ylyPkkLGrq%S-Or3Z<*W7yP=i=8r5N z9;n}3If17BUMW?iL*@D*M-d0W;N@HwTGU>!kuH8oL3@(5r4K1z3VEpZ@(0Ws@Z;JQ-x>7sIE)p>x+})420Hps8Z(|{X=1g;^WxD2azmQxM)lQ3ML6Yl@zx8Nk zWBjy7DkrA35rhqKK{Yu7tOsgo=-ovR4V#E}O5}(G=-t^%q5ng*L)CF*(;g`<&~S+{ zRHA>zt!pMd-a}0Mx|HIi|4J0ik}TffaAC@7$}vXhBomo0yuelU{*GdY9B+XXQFkou zn8XNL9aM~HnwU0)N!|Z>nA=pRN>E<&1YhHjI0vG~Hu2oEpRdz3^?RX-8KDdhSu8$I zCMEzhXIv0^Q+!c_LN(6HAv~=yV*ZCO}LD=s`fFbATSg1NlH9tW8J z!#mi8q$0w+@j3+!B>gl+1x_LmS!)K_vU3_b!9|IyQj!Z z2_NPzE_uVVJYEe=-W=?Br)-KQBGX*A#e?*8a51GesS(eL!_Gu>)D&C zv@P+z3xeQ)xhMf!W3!P)aXPEI4~LkGa^vPh-hNjGgUw%k*n6`c^lRZBL@oiIX!<>E z!K!?Zr7&m&qL5kGBu|S+KMXMkr(S`HrM2Y9X8mU(?YVzP69FjAr_3>|9j&J94OGsA zqcf1|a4OMM`?0UPRu*dZx`|07U`SebU69TG5n|b{`~>i$bnh{j$n|`pETtM&_2&qm zb7{XDYDLs~rTOTt^SKlxP%{Bo)NV?73Q3rPnq%p=;-LSku# zC2LDnyXKobZj8=gqtsR8OnM6|%&3;QJ|tPt#0Ci4Xtt;Z;B=-W5{u)JNpX7hb}k9L zl|q{NJB43pqSc*30G?S5;3*JxhK5q=I!}H?+tDT3T=OKC`WrAp@sMhB7uGoF;kq{x zs#tJ^7=_H_B#CNr(&|#GgK;#yfBOka+0^E0zCew+F4E?@Df{WZWvMUFSYQ8c^b#n< z`ugA9z>MsvI}^}>St!|3Z=vMgi+ybN1?t=FU5ekGxD>0hOBrvq9Xlyx^Xtx_`yv5~ z0A@k}gY6hFmPQ2Kj~R@TgpT`r*c?Fzu;g-jO!e5cnoulvrvbQjVzMWGQ7PC^pSF=Y zz2DJ&>Rr<5A{E-znDJmZ+hZ~8Gw4ky`K{u{2;1~6R=dM*0m}oqhk7h$6V7ivcGaXS z3Z?5Ne=TQ-Ums+r_cEo}u*8PuzFd#S_)!?mZx}Fd{>!FTp3g3mm}o8>t!1Y8HjgC! zsYkGcCA_Eqcs=3ap8gl5t-si^jr49xv>|ZT z*JT>t(_Za%7j}%O15M}6BtsOuWSJtgI02&@f>D?6N&6J=>9BF+H!o9@g@`ZU!qKq za_WWCX4)_?laFXOIfDKTz@%>2E>_B#m-2w}W2Tc;Fr0BDzy+g{70Kwx4V|(M@D=G(2(;=ugeYMy3L|G;W# zB$~H&b*)!wYSaynw(L+dIof2}^qNwhkY(Xe{XA!jtHbu8Ze3Fgo!(6Hq0Id6Y7^QV z$c?Wuf80$}!L&DOJ*;wPqMq};fYxc}R4j*PG|yfyqMGij2Gz`bj;L0b@2bWtyMy)h zx$IP%!091P7hhA53szAd(35#k~uO*bcYKWB-zWspQL45ZI%hbX+ zbcVpm{n93Rxt|NC%9i~7ZTW*TTH#62U=jj}7Ko?r}u6suf-)!RuWbTz0;dpQY< zkn$0VZvZ8y{DXd6%s&`4%`y+dc47k(=0PrBOSuPH9SZkgGdC{6xChPsEz@uhPe>$E zuDB_PZ5`oVX%g7RV(JN3^f7hJ1U&e~FZMomi)2_aNPn$9spQPVsEpg!{~p7Ba2)X= za2!d_HKYqGiucMh6u8i-U?qzg!M?^0O2Y(|2h+1xGoIrj36A6R*nM$&w2pD|mp3d6>45ouasfIpho`%^=lvYEI?29S5*HdOwF!iGzZmV|3H~eAG+B%K^ zo(m(8PFCnT^!9(H2csAu_>T5tDV5aJg|u1E+%i)7r|f-u>RxC8^L5t*fF!T ztwu?m$Fl$Eqi61@`%*XaD_Oqn8s6U}ap$^5jLP3sn&CYLr2B>LSjbdJVzusS8yrX| z61%DJ&`QmZZkY3Uh#z`ZLyONdc5r5;tDtlL%C3Gup&rfp^uPm-%FXdekx$WVi_t?h zb-bT!_4x2R91-cFBw*&NE6Bvtzm}Jd9sP%EQjj7p!+f{wT4DbNmpRB3SXFW8^C+FE znn^Pprh20)$@ZRAJ%3$)m0c=a!G=j>j>txaF@c1AlArsZC}Ynnt>7t}2^-g81%5gl zZR3D418<5yzB=eDJkl56u^z6l_X5%+MW^X`Xs_DDO_OpHqO9aelFk;yat(6orr=?6 zxa_gIouL8LM%b=AA$2)RLxpZ9 z!IAZ_x|Y;SmkWGlommmJ?}%E@N%C9(hYv~LfvRRsGRVANzQBfK=C6PUP~`85>fuV_ zRH_=6Za>bE_Uk?arH9!WXJ4Hs)1RPs#D=Oj%3zIzvhj|Rx=~8$^`PG$D^*lwot6Ve zwP41zXD3s{yu>J@jG}GQ&lR)dEoj{6_Bm8mrSMie=|}v-bpi|A^f4`E5PY9k0K^!R zbfG6Zg;KfZYBz}eeO&E^Xt%!F7v24FoSs{~>SdR284cSg;=aduu1;90AGRaB+aO}M zZwSx0!emi}KxyJM7aYp6d3!S0y=?c8gii2+4E7ptjCl+S_!az8sBYlhAcE-Mv4zCS z=+4W%R1@H#)V-Hf=MQmRb?rJ0F|jiIKxabo*n9W$xfQNQROgZR%58uXp5#^WFR+3M zYFKo}R8(7X#)Q=9jJ*pi!`$jlK<2z9<0*DV<7I1Ycnq93MJ(1!sqo4I zqSPC>9Cxh?l*i|obwS@+Ia+8%W-UpKc6^?XU%D<>U6<=c9_(y6N>j}o?iUq_AM(90 zVOch!)fgRHujJCYQe|Unk~VwSNXcj3t%4HxcQd}1x!qhfz|PRK_EDmSm;nV|#gK zk^j3vASA6TY%6Rtb8M#*4kXiLx^Iey*9YqgdsaCFy0(Epu{1gaT1gQooVR(}BmqqF zyl@DVP|8X|%*ZCU6lc;^I1_j^fU3e@36)|5X`sxVJ|eW!aPT^7!jKP3qUmM`8HdV4p;1sJ7SO`2Mq=kB0MwXrm4mn zKO)g4h;TK_1;@xK#vwu%QrWY$i7+1C5cJo3&Q7M{5Z(mK)PfD+nfrm%go_X! zMb+#f_88J0nC=>#rFK^s+v7WUr?i<0nxfL?N`bk%tkk-2UOj{K@R70RnT(O(96dv( zXmh4sQ30zB$;Dr;21kI~9;yNm#^Uo# z+^!nh?uJoY+MVx69f6>LM#VAI-YX|DYm2~wK|73%JfLxVGS_5W8g;VCQQgnabT)a@ zj8~kGUudeVo`~=2532P;Hfi7U@YNw@?i+R{yCyRB;TIx6lj#`lPM2J1rTL;!v}|=NYwz8q0a6 zM~5<+cFg&XdzbInXB|iBSSYW|dOx~sbLdYD)TtqA27f7)1p3bRs!VHTyF<4fp6_@Bn>qzU;15{67MWa2ky+(=WYth&{=bK8dGnMrn)tq2N26I9#d zR1&lZ@JC}DWj*jX}2jnzSczIQ~iN# zt;O<6?eTPc!4bicCpSCDLe70;qIvVW$FkV)!A`7jF4*1hSbX~t!BN~Aa#XFV5XZhC z0xKq9jnY!BhVz?5rRDkyhgzsY5-5Z|+T7-~!;`({ZX>ov(uBCAY3rGkZ@kYpHp7qY zx$+2^pk+V<)<-;7NvRa;afJ!Xr?Aj5;l7GuDs@u24=P@ibs_qbWWH*;&{lSk}ZAHrI1f{HCJ_ z0G~cG=%8M0M(Y85dmD%;N5nM&+t~0@t%ntApqT{xRO_*fr%dyVY+KM}M>l;NtImgvmSOl1m?%eF^HU&>uEY!-ju_@Mvf29jIG{0%s~FAql4 z1?-UN%&Ti>+)4k5d5Q6?JvwQ?$pWDp+GbIEQ8EG|5pdIq3J{$b9<~4egCrP;HKn6N3$n-UV5UV2(UWQ(&IZ(GniLL^YPFAti1N5 zgPrV6i?D%dWEbK)?xEbDqR)i(&DJ8?BHlqqoOjGLgu_HCj|_IZ6q&ZF;Z+W~*p)}B zhalY3hA)WmB<}NrE#`*eQdKBjR1$f`6U`&P&LhPv>b&NO#5UHY4X(Sxh1*izO-@1P z#s8)`B#%2k^(L&$>qI{ByKS($usclju_%?e-z5c>NNet`I}#la)ekZ2fNPVNG%t*; z{05?UqI{xoQLjO_L_jzMN@O*1&rL&127BXo9}{%D5>st1|ASl&GF0YhX9*yW>}X;M zFlM{Wbd!$Rmh>rC07n>8X4|sF7}>~XiP3x$){Y*Uc)e-nhTS#y_ub8Y_hznwiL$ni z#kkEahT%j}KcRkU(gh@Q4$~LWVhfTgJzxi+F!g+1ny=w5bv-)sNK{oT`bQA!aKT>D ze@Kcwdqmv%VxG)m8ls|Bq=;#m65PY`%aHmQnFoK%D&PbT7=# z1F*@?W2Q5AFC@Nl!iZ}##&cPUnv$kD5u$DpfrN`I9f=)xKdh(7EE4wtjFUXpESWsk z;J<-ky`$0rnJju3lf`SwWF2lG=ajG^~bAm^2N$}{mo*Ha;N`tMsC11VS*sAuFtuoi}ghbj0xUr|49IP%m zp|DuJ#UarCgCWqHK6O86hsy=QS98IkX)J_ zl;k$eC54U;2|6-K?BKxDsVzn{O&s_a)Nu(Db!!4Dd$XXjDJ8jk^VM5La`CS@IE3tV z5JK(V#8|it{(uEq{I=%2z+A^TWsJ;OFNb67Ie)W|Qg?!i-OvGD*C_ zC%Mn?Np?&=_a+ap5K1vZZ4E|GIr>%W^HF%JSTi_7rrb%wQC2bxg!T*`#>>Mw0r8#W9V+ffysubW?6CGY~Ce zjMz2f=nU=+Ujfq517cFrBsN#7a=84GZD0Zc=EN@4-I3iU9iW zO+(r6%2T!?AlOHcqVfxW0yngo%t{i+8wu_9Fw;Fs6%emHH8`Tp{3%+j9A|vtsX_Td zePMdOBr?*nQCj(+kyO3<2GYw0Ng7LKrLS8WOV%Wr#~0vgXj7@K zW$HaC(yE4nX0W=X8oL?O_{~E0y?SVtX2o!XRqAQVG26)Loaow6vdLS)^O5A6;<75~ z{8AckQBcOhFZ48|BsOrqhnj4->~AKK5ka+P_293d`cQ(^ysTj8to~c%^>ebB7=Kfr z2@4`Dm>vfeOmv(v63$Nu4N`*+X*-fcm&ngh>BPBxJ|oFUQZ!-2tVM_-tW4^sRD@Yx zx0i@8naSyfdQ?3~G8ml_PrYsy%5+@9cj``hpC-+9u`q8_*4EN-LLC=z7AyMSZ})9} zMgIpH_pOnB%U`#JH2s#zA@h9==k`k%%3YoB_xJ7tN>jh-6+#_~B6r&O+byP2^B>5K ztufdE25^vf9(=O&6~iQTw%P3js;2uz5l_=bQ7jjt^m6BZHT#}30}`)$=2fzdd;{n! zh;p)wgXa&I?g;mqVZbtFqTe-M76}tGbm&dqmcddsZBs(w*3F|8B3q+88a8GMf;k(r zqlz|Z6p>f|5#%~ezQ(O_xz`kz;?aiqXLr3Q{T}{<-SJ_i+!pKOSV9+}E)8-q{2sG2 zv$+}=D=-s(>)i7Cx-yT+7Pm*$aPZYL;{XMrEx?#7XaUAzo;V>9aUXSWd4L&HiO~we+)Rd7d6KncCf0pA{@__=MEcfQLEm9U4q360L&&4q9r!%o=hBxnSVV1y9v1CU zxygI}jZ}w6gnv&6isnu-dNbw8kKDpVgx}rfK})gU{ufuRQCdq2awY<0xUx&EW_FLl z|J&&&4t9m)fUd0?6wKqeu9|dFqBdaL@KL+zNzy)zs$_kpgK&7sG-~GCkvOAfQaL0R zX=0Z_+X=z81FCQ%bl~yYrw6@q!FhUcA`X`hI&2!p`?z`Sm8NxqkIm+@vA&Is6ItlA zv}@4^v};dTvTK&W-w{Wn-FD?b5y`gU9>DJUEGpx862JhY7nWzU*6{u`i-)L$Fqb9# zFch;(c{1$l6r>f4Bd8KP&W?IZtEI_ML!E|yf^ZrQ`=a{Wf5hyM3K!s$C@-F*JOWP{ z&~OJ-Dp7)-sWELm0vaB`!^AU$qt-RC`)b?;N_HALcK~C^+@Dt(t?I#{nnomKgDx)2`F`jH}kuRI)OrqrV+p!pYEmkT4U$*67;H&WC0^9NYa|54w$&doq&_LL5 zgW@@pOm-p~io{%$$&S!V$3HG4{Nq%&^XxQ!dRFFD?%<5{i2anX_Mc6!SR*-%DUyf} zkgL9(Y;O(HHMpvH@QV0}o8Bc|7ASaSZZ+IUzECBZ|z|N^J94u1}iow1p$^EIjq` zMw)M{mF}n(@3_k0(hD6nuZ8mC^w1PNvq+A4*ZHUx23w7Dw{x|l%ygUc zE_--2E^<5-hp7tVi3VnxiHS_-P;k=apJ~{3~H^7>O^Nu>5VFYyi>1)b`0{0r%8+5 zAoSZ0OJ>6J&F(g=r#apU%3Zb}GqBnw&ehkju4AlB2M721NZF)$DQs8PC9aGhXIP_w z#1hGDcR~W>ROd)QqYweZ8u!uI9h~OjmlEb}uZ6WKViV@A(h>;N;NP@0T{z0W<@bVt zx~-Y~E0;n+b6uE~Z5l@F0RW@*GRHbt6r$)f0z-6Z`0bM5aHk%rII^n|Cx!x;HsR280TS|+R#dA4!H)dK+TYSNdS>z z4-^!Kv0N)ySQ=owK*oOeuS!<<+XNMMdyE{|79Hr~FZ3`4h`eJIbp?5lWXZE~;k#k} zH@9@0m3xl4SmKZES282MlUXI0L7DQ!D9FKEfsS(_R~jo+^}~4rkvbCW>zmna3~NC7 z&?;9!++6GpXE}e(HWxr|eGl z+ImSxyH8_s+K}pA39en#lv{yoWZuH(D6`XT9_TaLG4m3qba!~Qi9NH)c5ZZO(xSrF zoYmW5u$k}-S96EmbIcFowxy6Y)6SaR+ng*DFl#nHc;8rVPjqjy^I%hE3e(g%*{;dA$D}`l-Ar8_L#w9W6DDLD%93I( zxO&X>HxZ7+vzAXyA?8JFc#@hT{@evY_p!o>^3K5x8Ip%@j*s+#vjs{mqKGZ6x(`Gj z{bs(h8pIdh{Nr-9r=ef-8M#MS#pnO5vOd1#!l2b0%kdcZ6l~NgMu2>AHN|~n1vo~S zXhnZ#3s=2bs+OO@74IazND(kp5l~b%kL_}ClwJk~ET#MLfF*QOlKcxGtvmya*DkC@ zysE;h>nX|x?qsN!BLnWL zfP)-jv=EMAaBUVYU5<4Drm@OAe0P!o-SCY#?WP3i-2=HiTkrB<6Z51Q{Ht^THIX1sv+Xd8U-=`4r zCuKaB_R{5wnFP(d8qUd`5Ue7l+M6`S9{_dCFuJ| zF8oj%Jht=J%ynVjVJx9k(L8{gpkLDZi$W*@-Y;b|$-})%m{|(wNq*E47+c!&dS)Hw zz-dpNbXftM%O-f<7hoYVpc|$!QSO;`gTvF4Aw*3Rtq`tw3|x^hf>J0oZaQIoVyx>N zW$9@ei(kW3qipHkV0FlT6mZ%>; z8y96h!RKzWhUSTU2>+GFn_DKSZ&S!v1CDVvF;G*J$_`fAxJy`RQ`x~8rd+0G2=(!- zvvHQt2Q5yV<%lqxv~ALg!bpbCbMnPvRom;js;}#(A~J`Us>LkyG*x4m-N6(-gDnmd zm+g3-kc)Vi@uG2KfZCwAZTD?T$wrE}r29 zYHLLYX|m8&vlb*N=^|Q`ALqM}e3%}>)|zV4Y^bbeJe~}_n+iM2#G3LtST>2R1S(`& zaFtL_ijQ2;-x>AyueZt8X?A~Cf7P0-I0w?+7L78rZAtBk^fO7JvkE4o=_rt<0D>`^ zxF3-eI;+ITK2bl9m1{LMs7Y901D>etuDPhj!z}2mQx~ceS(1BHvU?Oyzc47)hwX_R zNp1b&Cl323j?l#Ym^=}uUpRttt*R$h{S()_Coc0e388HgLT5?5Qav2HqgTk&WVv2d zyhF=`zSsbj*DF_PoY!c9=Ni0}?J993kJw)8;R|Jsg7`YcPqJ}UX0o%wEAThSE18Q2 zJ5I{IMPIE)F`xh~9rhH5ZSW;}uDYdlTmkjNEFjl7)47U2Q>L-0@IZwCHC|^c$e)f0 zD%+VdJDn-}D&)yIc2qe>4L5|G!k}FV>&pdz^MMr~4ETOZ@NM%MtkO`}+pd#DQC7KM zPjBOFdFE8S;}3AYS@3I{*V-o2?(@7uSmZlysqcNSdXuI?y(ig@9EwRh`ZSddd#XL2 z)>t!)pb1a&iZCris+lgjZ}lY?4s$}A1pV{ep6k|fy)<}^#@}>F@KL?~-6g>@nNjU!!3i(GI3_S^&F-^B zS9j??0aW}#(BiWU*-p2jM!KVRbHNtQET9CVz$=Uf&2+okt9DZ5`0A6xRjq>6x%gu* z3sxUB%eC@0UzTQwME59T^G$tOlg5T z^ku|7MU;V@MP-79|0=p)sc?9-ZkO(v!~4$a%jPUpY8`5X#-bvE1Pq~8&^)bIW5UR{ zHep`t>5OmqI&t1XTN+{II0dgHmoW0IQYlb8f?u>{Hs4EU~jg|#DZC@Gtq48b`)SvWg?nw?1I;r zNUfmB+IPOwoI1_5ZttpLq28Lqkrh0zd=;f6!H!52``St>nGBkcT7>-u(YMwO_6|#d zPdhY7fjhAx*r92&i+m`CZ27tiVO2z2L}4J;;n;|6mm&qiF|}38q$Dw=%5a;_Bs+gM zlaaQ@ObmV0)4A;BIb4J0sf^iFihe40YAV7Xd`&8+bOwynGzHZ|o?x?*%}CTbTn8IZ zUgs9S0Y?^eb(crH4pv$RBaN=CsHA&nC*D_8?>e05SKGZH5-+gmu zr;;g{W%8?}f#TXl!}c?Tit#MZWj)P z%^A}xoa|n`u80e-3eM`8f#w19Ooc>BPWFpm@T%Z~)sCHjOnI7z6_R)HzE^<|%cxfn zqBIG60QEH-W`RhMdBxbM<23GIr>Toz9mwcr*U0E6DS7lXzTidLwYF$ar`H^b$`uGM zWroO+yhc*md{l2tX+t5ex=|RBc9@clq!ajvejV==_;Ve2CdwmV=+4esXb%mw=Z5|O zp>b4b6Y4NY2dR-j4Rk?`M3;LPbul557WZYRS1;_>v*F&SG>+!?uXiYAHb$5WZ{wh&4 zS-J=s-CDRvR45uI=S-@JprNUnSm5qbt;6=jBB+s=dDL*p0<=f6U8HlO`JPfu*9gUe zw$L>nsh3O~9;LmKW3boJJgY(dM3248JYdv4U`!8iJhri=LP^JYJrq4}U;L~lO_Z9} z6m|vdFy}EHHHh65Jn?at1)J;R`f8%Fh?`B@#K8MX$wks4jPsiGb7!Opb!*K3-X)DW zx{@g#rs$-`#ve)Q`HRIr&T@3ji?bN%~`XqzPoK*y1=BGcbP7LAazlT1iLDGOD zHfuAncBD{XwqT4ff+{6|i$)nzuq3jAc|<}q*932*F6;xAc+~Hpl*_oOnISspf{7wkqi2O zlmO5v10$3GrZ+%*vKxvtV>dLBc+i#b-*S4juJc$(@s4Wg4ifyeyy>j!b!@`Uovcca z8xFP~fnF2a`SLD!(oAyWO>+V@oQ-nT!=yDk3~1%Q zXyn@WkZVo3N|Z;gmIqj*{nO3k3&|nW-7e~uO67`*LEeU8ml@CGb81yC^3_+k{5}J@ zds-_9rmT_Uq4?|d@SSfr*O>drogw)3nCKdDrNV%a-%rcbc8gSjW@hDSbI2HMP;VT zb5kxbMn4xlm86g`KHKq2dhTn3zdHO&T$tW|3(saFiv0jd0mKiC8qvKHEW6HPz@nHgyEOs z@hgKr%HI(8ye@b~{;hYt^mW0CLVo0~9sehIRi4-Hy`E8cJ>iYPjfHsjs$gwlb}at& zRl)lFt#MyHcquBDzpMu@&fgOMT|Id3z{qv@Np5+hl*<84IUX8*tD~BW+Ek>C4d=UK z^TlwM_2k2E$9o2YS7^M><6Fn$PhTCJ)%J60e}~t_|M%+Plmg+WTdoPVwS9d=-`#c9 zHNgqR{Kw)?UKjjH{^RkQHwUlGkH>>=4t{?4C)zO}dgc;a9Xh3u{lcaK6n`=v9tvKY z|5RLjOK@xc&UoxC!58?n{jI^i{GE3_>21NY^6ul|@jG_~H}l<7-_BX^-F4RopXFop z4)j=^pKb?oF|d6%zH&I&)H3+PkHj|*2hYp@b^M>h!72H>;-}mY{9b+{e&Y?n$@$O3 zcis?e%6~TgryKa~bMcyY+Lsr+GdM55FCKB9ruFjqyE@*b8N54w{_bEy@87f|6e^pr zjCm6rQfGK~JhU62n2bNVJ2<<*^Z#Raa3bwDy*qgF@o9U^T&}eSFtGRFvXC!q<7Toe z_WKv&o$p2;$1p#9ckq(J^i+J_dx96{?}=}IPp~!r#rXdB1Q!>+GZmj42hUk|Z#&oy zj5UV$y9}h`1)+6Rso~$od*k31fbHCoU_<`P@ykboXXo#YKRpt>I)8Wkgc~`UyW?Fq zl8xuy_!~C{SMl|f^Y6V?(U43Msqn^JC6U|^JXoAa8jH>ZP*N3YP zSH5q|L2NyaUcy9x|0Y{bG1?Tbcwex&^*EXw7oYjQ;05a|mj+$=LQAo=t-YhO)P)B! z57B)oMzeQ)@_j*Pe)SgKZ@aW6*Oe{Xac%rOhQ;vn@wfH_C$2w_FI70ZE&NIw>_+mu z3;B9F?tg#Ka|D>|jzPKqU?bg{`vWy09r>yG7xIN5?epdSe0<@DgLM_<(a{AcR9nK& z#Dnh-PC9A!D9zWSQw%;U5eb;_=DFdmnuDG?7`~S7JjzOPvj?*>lN>m=KLfuJk4-i z1ihl^jAlWALrRzCn`7_f*I9lI=)<01I1N8nF0Y7x`e6vVHUo=szMpvxUdqg)^Rp?oVP=U)urxW$26Y^zxB6vMNf)0RrBkjCq_r| zQZ20OFEC;beHS$%wY?CMO@&G>%A+lyc3Y7vVsKZVXDY~Z+V6QDgJ*!>u%Uw6?}6OT zf(Q}&HCTWnV#BK3ksMocG?I+G{`d#5>Zt9fP3|W-1fsxpkU6ZCj@V;{`P&wfjvGbM zri%)Se;?Ja=;ZnSZg;60mtai4joEKMVG|@%Yfm(mKyJLfX1)zODxtLH9*hbT1zX;U z3$3VD%rm+d8G;1MS4UEoY;726`ry zY9m24)9R<{iC`QX^;b9ig!i?Jg4&!bNaTtPWXdJpcrq7CY5Jh^MWtaS*~HY-;qij|e6B+dc^ArQ1Ga?Wxrw!v0Pbw?}V`z`-yd29f8lt0jnUCa&)d)?7HN zKC_yOyJGANTgYc6!8hb50GVt@UA%mtiT?TxJ!+ z<+fa^ju1Yt(TY>m%4cciH^AL+y6SE*zxCKn@+ed2&z*#V$2^SPsbkYBkBxpao~c zTt3xm``Oh_%aKg~-R)_iG=Il!LGStaV75fX%PB#ENMn!~>Ph13 z^DXeR0vQj13!3`^D%7^{A&-=`;qIHiiNA4MQ0sjK=}OU6)*unzu#3Mn_w~4CEZF>e zZ~3LLoNl_*^X@S0Lh-a?^P{j#)P$AP1#_5-PCUtqq3|%+jBp>YfbisQNI8D(Snyo5 zytYrrj_NUprlMn1Yk(inG$-iElkD#}f0K=xj4K}w`i|Ns<)G0msDX!`Df*!%v!_H; z^l1y({jwOI4f%5XXCDrp0N?wr4+l4v$2glvMg)RK;w>Kup1NUN-^fPLMv#F7Lu1V* zPzq==@moIKQro|6H_V|!8pzGi zcp&!=?H9zSeKa_$Fz0~gsk0hG%6h=d{_1gE35W?upn4Ene1}Y@0eW-Wc-WwJZP@n9 z6!s}gFcL3(G-L1?5C$TK?{bc;=+k@!EQ)(mYGRx%e|3}`NKvz{|i`r-J zlfKz|g(M_|?&JW{NN5DnFDCsXNt0_2z)>1aG_9jnivZ98!&D9Rj4wv*j*uNF9BTfo@JZ*46jKEARP8L$XR zqFkr=x4O$xk7r%dr9A%w&^mEM`iv!7Oa1Cqw)XPzvh)whJnG^Ce(?cwd&St#a@SN8(i4Xsjftfi?WZiRKzZ9h4l8}*< z6<*t%JMuGq@+yxW%Nm?zmdk(@-ain4map*EAXeVI(wpflkw&Y$F3ug&_0ytU`Ryvt zJAG}IB3OJ$D652#PMIO-gwk^mR_hF#&%;qCvDk4McleAhC6e4U_$be>^7_;7qpJ|F zUzQ73d*#jy`DC@%%ehSM9o@3M^NKWY?-%-o=B_a1RFY<6m3z&vIUqXV%y}eziBic|*EJ{XtHR+*|FP${LcYsDGxjQChC` z`nj7^Wl$@>u<3;R2~^;kYoj46M8nuBhX_v!^vl zkJovBa^91(UqY^4F1Ngdi2Rl;dCBYK?2r#%Lc=igh#Y;%>z-N3c7b(|v$tAe`lJCo<>#_PL-cqH>POoLgE#ze;Gt7Ika6Q_Y7vGkb*L$4{54?n*S@_QT({UYvrwG zScs6Z?P?c%OFD1#&Tx08%772O#yYeq8@+Qo??{chf$AK3K*aIu%!epgW00%#QCOLs zVobbREq{`G%smOannBGv6YAY~7LZCg#PK@h}tR z=~uwNcV+S}?>F9Z+sRPS$#>-7F0b7swRLg@N;Jr=-ORi2D^(5Zm8tf0OfTNB`lTXa zn~&-WxWQVBgcEt~%qD8z_^Q`7x4w4tRJO*|65ix>1+`Az%ya5-zv~YDesX3ko$t$nx9&liqibzix6D{J zX)e0+v*ArL> zyxn)mCbkK(-XFZR4$^jfVbfX#X*+|rXpnX+$cYz(Hc=L&vA7gQ(}U()207;kZ!LH$ zCGojO#_4#P2Z^6PYTud#)%>u=zBLWrzJ7MR<}3={KAC9WngnlOZntk-L!g8BDyXF( zGX{$nn;F2Uagg)aDm&%*3ir}*?86uAnDT~_S zWBI(T4${&yV|l!d57N?td4D`eOAVU%GN?2q=-M5`X#`HFO%#naU?XoyK?_@ga`+T0 z61JEXRO$v`dw9CdiVKT^ZNt41d(-R3n!Wu^*6fZ{iQUw!$ep%MUVYQMH2*!A9;ikP zV&1@TC)HcMg6415XHd5t>!^t)9*t! zI3gR>lm2Hj(-@f?rQpDetbNOi4%SaM^>Di|$abo0>N3K-lvQ`8!m&qcz@y8j9YwyYb$`OSuYR0?G+>l+={oosQcv$^8VYf zQSV6FHkkNYX}irE+UbZQH`W~e&ju16h4sU8LynzqH};z1cNq4VMdQY%$=q#TeqMY# z`SgW5*?nVsc(9#p+6E%LBk9|zTcq7~??z{X?k)Srw`^ zOjB(egDu_R6#=IQcX-X4#aWHH;i&pehiwE^GL|_bOs#AoGLidEwp_N}Yo5eT_>L6r z^g1*J|Jkuy7o*_k5@6KK*t0>U6r82xik)8P=&y1bhN{`z%r1Fqr`IJ5M6Unx=}xbs zqF&+qTraRr`oHg80|e&2@8zd~Yv~-^Z7I*c@3q3?ERq7Q)hZUMS~pFVwdND~_IMl+p-hfmG%2jO{uv*t?nBKbzy~BwH&bwHuz6v z{Rix^pxc2DSRC(4VXgPj@vPAvd9n7VtkE7hT#LbEyY&Asm=8FksNC|Q*ReNUXviLA zSoVmUBjDXe730#6RsaCkvquUm%qc)`=Z7HWHYxhZyQ=ZOr&zw184M~zA)=I`{3E!G zv9kOluXoD2neb>k!fNmM7|E^MqNV zv4ykb_K&@ex%X=p7@KemS@v=sODbgb$6mW4AQx{Pxj~i2w~&bEckC%S_Ax8xerd4_ z8RvaDZI^dCu=_c8_Nj7br=4N>FIXXD{4!;QKt#I|VTT!EoLAyDJf$#iW;OVrCF+=` zPhXIKed4w7;=Ffa0vCJ&m%ds){=~aV8L=U|y%ShZ*YAcAV=jJ`5nH<38xoM5Pn8iX z(Xu6bSQ;>3=kLLerVQ9^d%V7hOfX)3+^K2u?H*QY6KS{C+u&5l7kjzj`!z}3=bcgT zNtOb6LX!S1x4f@GOxz{c?(stJnfu`pnY*8H zeJ!KUYgyFd6J^D9s&r#<*|qGxfmJ<(n|88D}+a#M#d{p`V$&QEe2c^AxALP+BpGx&--j(TJE{8tzmL+#ToL`jwH4Inq`jozI zt>O@_P2J3E+OsQzRJr$auW83E-^CNL~xDAC!xwXBM?o{RpZtbBLEpG&)rX88`T7pqi4sS)kV!^0s+V)+xF&MQEp0l}N(zFw6 zf?P0ZE^phIA!>$6(=NR~#ts-JO>=hq@g@5Tm!=gS4&KGX?gO+ z7v4nWf&TTScMs3Xtgm2=oX@1>FjV?!x%4n=aG^{%3^`mVPwC?c*?Jgl%tO-RYefHt zrQg?-m@QX)?G-z7rSxmm7@x`Cj(Dy5n5XiTA|m%GBjqY1g{E?2ig_|mT`)q6BH=~2 z&y3Y~$YTBb0{P1~UeAn2Y__B|=F!ThzVXg;oX6zHe|tB19Nu=3GxblJd0fhlc(*t+ zE5AMBUFbNE%emkEG|POc`Odqg)hyear1{UU)Yg;MSYHSKmBpF&e1Uw#i17EAF_9*bnyQLk~<T)!@KR-=oI({4d@`B@EQ=hNI^)HC$ zJ6)fzxc*=3Q)6L&B5JW3Q`EK5KJ^g+ifPaQc*|t7Rs1CX5$AJRo>O#k(cYH{jOXeu z5a*g9DUcP{92O_pzsP+eO@7y?s964)?4QdRUu{%0Qo5!1joe*Xa#n6p8+XDpGAg&| za(N}iFLb|7mEE~TC+AhCKy#{9ugtT8XOcN6o%4#?QN<;BMIEFp)o@oE0ql-!c$ch~mP)BSF#s{hTT)>G1b zt@ZkJzaJGYNca0Yhh$GW5f<;2vkvbFW;L;Wu-4j+fgr{HD%6 zc`(B-PO1HtUHzb}&EV=w+Nv!m>fE9pNbxGLk5NT9hOr7jb;NbKC&@vc{bdW^x&t<^R@w=ZMV9@_V`; z9Fd(_esgECe4phHVZ^a)|9t15yk2CXx+>d_|63kEAF&gk(|d*B2+iE5E95Vp|9fY= zyyp2Wk|t!DEppKFpKE--9$7%CgmTF8DlDbgmKWrC-=EJ;d#&;R%%jE-Sm+|hEtL^> zG;b5Nq5!w6b%~jgUO1!|hQu5z1XzY=NO4$pYr$PIEXO}HidzNBWm`KJc_N*m`?@a; zl@622m(k>AhzSUtdzYSKD8A@i{eYvfHyMDp{&En7{lT$}F?r~Iq={=i`yRkFdetgZtIaMxF8rA9kw zLf><84uN5!mP-3ssdV$XT-MljdvjyvYS&sh*w`;%xuzB*zUF2g(OT5Z{mP4#vkLr^ zlarU_XTIS`ar2ZGm4}=8!<_u%y{*g^BBYr&D=%%~zv!p}IpR-E#XlMQQH8`-p$6X^ z@rR_`_X6iK8p+zO<}?M|GcYuvz}2JGkd6?sm9u*?hVSH_p0>ROJ^j9&@L`SbCJ3e0`$J9Y z%k2PnXn#2r60xGi_OO{Nje7a53fFN|C{RVD1)?S*FXQ<^&LNc<9MuclS|$(m@`uus zPkRCWb&}bem0Bgg?d@L?hu=b%>UCu;#wkbPx3Rb1;*0=(s}gCr2$2@RZ(ZW!2K(_x z3)D^VkGPb%h~r^(S+>CK+{Xg<{62QDf9d1@2hH#3!_-Q}D zx2?_2oA$}hn?gc1_oc`&N$Y2eoZZjAl#-A2v-Ru(P7XzoX0_jvD$N+Smt8w3&$qd#L?OEMt^pPm6AQczpn9fb--3q zqly$N_)hK~0IF`1#RDu>emQ_`e6{o%=>HSr_>zJCbtf%Jg97$tov9SnG&3utJ6Y_N zkeQU9c>y~vj*w<#G_2B}N_1HFW#}LasyQdaqun*gFL1|Y%Oisr_H**dApbR7Etkfa z;1#kh=3h?#x)1iR@W4jPCAYIq%rfU{zlHq%556y(2K(0nscxrm3ZzQ@dWzox*w93i zaAW9?ZDiXi{-4}2*)r%^apxm9HI#_{HuW*>c_x zzZYL841sAKn=Lzrz|K`kyVGdpm<&J7qTDm5`BBaL)@i_TjpUy0U!r;ca=Oj?-0AEj z$0XwnOGqv}!#{@#9ytR-xJte}!~a|3LmGrN#oU=~xeonHfbH)2eveDRj(|s3`C8>CRbM-?A43NJ z3utOyGz|a9$n~(OtE`n{p0z2>l-fcOlFUXM^ocx$xT-j)d!mkcQKnFR?kl)4uziBD z>fzzUGR!)8_k6!svsY~$-KpLBnRl^{N=~djD{Yf>y1;Lmvl^*VWv5hIun<(a>;lWX z-AXE&iRl;k-JKUIU%kK|?*O6ST(ORg@FyV-3RG$8`FJA+jh*Rw74ffjj~MQX+(2*Mn1cU_Et*tw|2dq{#&M?N^bux z{PG5=(MP33FZO?t!c;%&RFzaN>rLbXlWy{l- zGSYxVpue&3Og0wxmwVuuhlq#5!(Uy)htIsN3FY|{a$g2}| zF|k8dEXi0YlP}UeNL0L*Mb$(B>E?OCZ@?Xe*#`uIR4~s zai`^p_m7q>WXRwBwtV}`-);V@Z}MAIM*mDRe48`^+@2;oM$qMHl0DMz&}7b%=?0M? zKcCC3&nksC+C6Ks*aBdPi=4m0{wI=`)ehp9-5pF~mi zlNG-${@LhfRKWRNd4vDKFMYhbGWSNvnRAcC{^pne`ViOs zdQ!f;@^=X1J@WP6*~h2J@SCC9(`3ob7ChhG?2m*=y7nLbutv+F1JELLuz}-^Szfu} zAAVs{ykm_;H6hb)3lt%3Z}T5nZK%F2B~rRI8`HVU=~S@m9Ljcqx~yVc5!EKte0F} z(KJ_Dj`Op{AHx=OSo)6f&n}pWr=dD9AOLC+7I{2kg@vS$Ky(xUtP;j}@x&$Hq%V zMwO(okj6)(_gLHDtz-R;sgGtyQi&oV1IMzJ&y>Am{Tn#BbLpM_qLhh`GnkyZ!L-LX zdgeIzh?(;EIKPwo@e+A^oWGFKW!>XnrUMx7Ux_xZ{{*morra~Zzaj5&YPXyju^2fn z5{_n}PRqHAJ#wb};V%EQlsQXjqd?x6Zdod? zPO2-**FW9k_jT`BcKp}ML6g}%S}w?rg^>JGOF~C+d&fgZUx=)f6w4Utoyx5wQ~Z2+ z;$Bw##(Tl?BAY*_E`NRAm~@%qcb7j-@vrIfM0TWcw&M;8`fBPYULB`x|Lw#Vn1}5M zCdtkR{Ii1Tx92JCSXG0fn8frCdeCp2RT$0E)|y2dn>q5Y`~33*Ot2vyD4>{-6=@76 zY<$okAkq8%tCJqUVkYO$@XyYxM=d?puS~vqQg+`DNu4R99l%AuvWH0+gW>b8GTvM+ep*c!lzK*7n()V zTP(KRC4RF8$Uh^I)xsbhFGuf?x7(DMV^w}2T*XJwtc`hKj(qtny>y@^V=Tqhcy7c9S=-+1K<~@&dD}k zIne?O%mD5M$8{(-=+NYpb&!x)cSZVsapVPnU(fXSD9L+&mVc%`oikHf$+aDf_C8xk}v@~um?3)k}ro(MuVbVWY>L_vh zY`?cV`A&Imwtq$Ym1Pub)1X)%irGyhD27S{XJ#!na1O;jF2l$|vG2zGq*(97CKD7Z z)MCvjHhnIo9=SvM&ZSiOos^0+Xsj2dB1(jVQV}iHS{qxd#g^VFN9Xz%w|`cP4QNoT zC&f_xB#O1uVjZ>EZI4jwn>(fQ5n6jpA=0ctu^trbQeUi#7VECX`aepsbz|k0M=3T* zA=05iv6CrgcZ#5s^tqkePmBGi#hxpZ-t#CnK_ODqpjdZ`4H{Y3Nf@AZ?x|XA%{+=d zHb%aqScik-C>Cu{tQ*DjoLORUXK1;hT5j}Xl$$zURy~GbHC>#?{SvowoOF5I?^;l+ z)nqiN<|H^|%G~leWj-4xOCP7LJ;%`VHw1rI+6oLW!O$+zwl3FledbecVwwDXKIPuj zp|xsIIgpE=F%8OHrR9cexuaU{$QbEaLAiN%P_BK0a-AusN7)i%yH3kpujQVvpj^!z z@@)l}Jz<%2SfK4{`F0J;mr(vtgYq|M`J1%-GYcsH!yR&90p;J@F3q3dIr~XD_X+>v z4xi|ZHf>N(2kOCuUe}LXw4U3vo;RMLo;yZM#*_XPC3lXd9KoRK*Hn87VmGfVSY|aW zV=btsK1tCxM#~!%b&uU4ofi5{asv2ZJ*R<5Hrpi&$@b`2xqqR5MaTIH*%KNx(3S?2 zB}hQ1+?JebOEz0X$(k{8@ghoY*1nz8pyUaZoYA1<3|n%REm^B2$BmJuPf_xSuBH|Z zO17cooCYQ5*pl;X$$3vv@_}*k0VUm+$I5`G>jqU}vn{mQu6vqnUyhYWpN1g4FI%2Q zrGDQANnXq>ys%z+=(A$I{7Ikpt(O`4{C2%;Ti7x_ZS(tlx#13C!_SWR_5yK>gQ#PzTPY0WxS65SWaF>p;ceYwffvJMT93h@%774 zPE6k~#n1Zf)JS^vv-IqV{W9)ZvfVjaDxdX7cYFq$Gh%8z0;!^_nz1FvDPFZ5+d4b8 z;miHr1<(GxL`V^H8HwTN@P;_F+`op^e_^G6Qo-E2n9AY?)vJ6)=7W`ffh?=^qfK7Y z9O2_~sCqWY)7yRZF6mH3f6E_}-|6$qc``|#@63|b`kXyQKG)}gEz^5AoDI@4t@iA`qylm;OQ2L0)>f{|7HJlS@nj70a@ak6p+jLmc7@eyc9 z$J9aFj;X~;zqj*pU_m#MzxdG^G9f(5mG>BJ-_topHXmovOD_{mm5 z2G9OL3Jex;7l^TI6$Lh+0di3?%506oLspF^)Qq<9b!o?U*!~_o%F2>@I4AJ~Wn5oJ zeelTLQ7xKr>y>r0Q$ra^rMMXe@u>U#tR3nbs-^pcPNg)$1pU#Q_aGpN%7*FT!Co9? zCdjQ~e^9AuMOV$E2Fgvhdld-T44IKWk7I1c&8j=>7J8WEp zg-*Tl#Fo^$V+-MKsDLxVVIsoo!1J`x?O>#NJ#+e(xp|)L1Ap*GGS7th)VQ=cK}oea zf~9WKzH<}N27O0}2bUR(JyFaM+qoAKo>Y^L)Q^T`9Ul!E*5J6O6=D*jIl^elY@A|M zsZv&b$B07)c)3{2?djsB&ew8R<-`wNHz|rl$W=&-XG!RU8#PBnbHqY`NWZmidSk3CDMoioLT&Unw-o5_ zR!zxV5{kw$wL0Cnm6a*riP`CZTB1=O^g>U-mq8;d=U4kLhV$=CPcV^sM4*=*P{6OU zbRDwc9$B%@FY@=+<#5PR!FT54xiAlb0~>xG#PpzgzF0?C$wQ z?pyD7be2}GS?}NKxGUF6-;I2KV{PS)8-2%hS1zyo`>XzRr*xw0I>AIo8fd=VV56jO zW(}$=1u)(u9N$)uqZTYZoz>zjDn?KXbRaF_7p^d&eSvQ%vrj0C@hA1vqprFpRn@Mb z?;BF-nrcwy^>rGT1}{V|!P&J>s0FW1%-r19DdZRE#jiYQ6gPh0{H)U+obPsG={JK> zeL~G`fiid0kUpVi_IE~*(A2(U-i(E@51~RrvCv?Jrh9X2;HX98=JWF?Dxsqp&S}iP zq4tRU$7_Bh<69@N1Js0M#-yA!vg$QIy+@M0>48p%m^(XmH}*zIVG!{1I)}v4Gt>ODIOfupbk#40UAF$k;o9Ocez>zh!x6a z)7{2G157$DO)d#Z_Ex_j`#UZ=w;L6j-Kcck>bGqYG@z<~)oDfTEf|BMnYadz+KLw7 z8Ckv6Z{qBhZM-$A%WTKa;mT*E`&-y+H_2sh`OO;FUxR72-}VT)bg#*ZxBTWQdeonMJKrKm_Ly{edZ8(hj&I{Rgs)!G2Obv3T;|`OSCu#4(LQR!6@A?B8%~=cu(50Bga_5%j zipBExcl|2AitiTnl`C273k$?K}qcOt^QnO;?RUM0FCfQ7t0U!I9nVQg< zz)8dqDSMt;i)d#~@{v)4sPfKVm(7^We4TNHWRTS zX3R2~vxg=4iM+hWKe^~g&d+RWl(06n{Js7;X?vc+X(^dPBlg-t<$KY^y($%Zaep`> zZ}8?~e%rejv4LlkeV8&|l;7|33kOF}1F0FxjW+_TH0DTmr?l8oN53<)8m%u^Av5xPQrK{(!3% zHb6Sv0%P;|5Z~tDu6nqr5v)i3kp|zbdwki$_2?QKOUd~1h-T@M4nt<`3i;+U|Mfl% zRtN~4SVo1cTG&4GW46Vz*eZ4h-8pp&3fnEni2V3DxG__@97J>fkX&%kk9t84-NDap zioS5nLH}D0>E3n-kFy&2@DP-2v^4z!efwOw`3scOugjt@n2n{CTfV@J1WEY3ulTw| z#(jmZ=)uZYzXBtiNpkS8e@E-ldI^Hw%?Y}y0_=&mo_On4{n@W+Bm=S*YtzG(&%gGY z$&#=AGCJ1n8#MAYa>+NW$vKsWzw!4w^t1K|yT3>m?cfL_m(dh5}){PhD zXAX26Wt^B%HN&$<9wSy?SyGAY{oe0;&a-SO32Sw-TBm}ggFIrmpPQ)Gq_(!UY<8ZA z4eAE#9NWZtO>9<3Fnf`%Gt8rM!%-kNPwqSFM*+yQN5RBLWXDm)yk`0jP|MZQ`3G## zkIQ*K_+5LiQrv)c(Zo$O!OCvu-T+lVQn5NA;?xG=vg%tvD8mg%zW4#4y!C^hPh_PH@(!g zfwj_Pc8ci(%-&5gmltc)CR4*H8Fd4-wx!^vR(_*&&19RsIMwtjmB+?twn#Pyz((l~ zkQ!SEb`y{m-MZdVR-tw?4i(SSC(CROxBx!jwn$b4R8wiWCTqd4`C4Bw)^N?1_6#Zg*8Sd;NX%v%S#3p6*5^++Og8f*WvluhZuIUjjNRa zzD!FutxLz#1@0EWBgwXGIWmpnOgt0^eTCkpcpxKaWP73!t*L$*gULC1+~ge5;W9ag z^$9!=HJqG7b(52xso}8p#)lQ=s2EtGpK9wq=_rFf?mDiI3LIjx+CG}k;yv7JduZ(@ z)@w=`;QC6fmzDigU+I?O-}4>67W0 zpu4?ZuA$hZzbAz8BDE&KSy*^1JgnvfM+#;TJrG~Dh&w_C=I6H%`0t|9m3Dy<)SXdl zdvs!ShK_q4xs`v3+mHz@lz__--EFBJXvIt1YfCJo1WtdnR6%@F&0l~fDBjy;`UJ{E zwo0`zrgYUT4wqVG56vAuuFKk*R&)qm(Ge|Ch9*i^4<_3{*~5A%Lo5#woygU$1XDIe zT^>tMquYSP7E#b3Cz_pDS-GP#siB&SR!RZ)Buur8GvT#DvlHm3)=VvGU(|+ynwe)2 zV%BkWXeJ9>zJfyx*<}pifwkI{5Y&%c2WYJF7aPGjW#(-9B99m3hKz`Ri>Jy+Uh- zX3$m1RymXC^h(ZLn3isNs(g{0lRtC<7-NHCf!3T*g@G_zbXgEBt0WZCu38Thq9JL< zs#NUlhx8YLiel{(_p#{wwhvdydA`a2;}K2PKwYdUYBci=mvMA<7N=O>F!EKT6dMxR zPOG(6!z~;7{Aum?JUm(rc-*?FgU~RWJgZZSGssyZ8-3G~)3^J4)85-kw2k00O>)!- zw>uZ$HQLLV;*J|q5+R$>cx#s!S9~Y8oFAM*D6!0igW)Iq8emYj{MP}~!UD#U?f(*B zTm|!g88C}qhp%c;4;ZG)0!9(OMkeG~z*OXz_CA1NV~9Ye6fm3s?U$3AOheTva$-hK zB)1+qs;IWmk#qmrs6YlcGWlmz*Ws8|hY?&RWMCwj%XJO#%*JYmzSL#b&Y*MFZP|{B zP&|$3((|&W5xauowjxcuvomELH>D$0tJ-^0mZD9c`fb%@)P*dQoo zuixfwZ?Y4-Dw<}#Pe!-GYSFkK z7vBKNQ|XJ%I3ZOvOC?A{&@f*ug6TvisCEcqeN zq?Ib^hzBs-s|A%nFfsP8hCa(M<1>|DL#kEbG(qolPK~W21=X5D0=i$Vb?~}MpR9W& z9yaz`f*wMo%u*-}e5>|9tX)&|hvaLMG9lk|FZ{KB%i;o4U?HYPkvZB1h%iIjs7TeQ zn(Pj?yBY~E!%Wup+HnbmQ6jEI4uB8E< z1tKs>(_M$OnxHpmBJ9}WI=1v^T5ym;u|($t9EocxEV|e@b?ki>P1aL2JH2gBmD`|y z%vEhv&|t&a-X8CsZJA!xYOk1^Qrj(J(%D`iiq>JpUXFH>M`w;`8ysm2RL%^QYi7Wl z!J*?9Y#*FQE#b>(Nh;LA|7Fue7JLxCc^W(_&-2 zyn|ejHP*aE`-8x@efWrit65VGN*BikEQ$?DQLXGLFePqTnlvsnU*PSV*2LV}WYH!h z`Bw2;w;%veAuDxN$;>9E$lbzWpeE*M@nh-5s*jI3Jdl8KML!v9MR*Y19i*N%k5z6h zGABCj^7)lcGh}A>{;#D&bF&sT+=1q1$-v2;n_QZ#BJZ5=$Y?qyVx5nu&aJ(cllvjj zVJg`%R?}ac!Sn{>APQintZkf=o9hlr{HHV|JA)A7S6!2r$8F;2Hfl!0%C;jJ)-o$r zgi5zkQ-*ntc1l8rxcHF-YaOL|^zP~QY)LB1o#Zl?+n_LuZ~(=SEg>x_;YREsD`Jn$ z8XUukq?g*pDOrwz5j>zQ7Sk{iP`YKHbsSuhXc@5#^J9wMyiPUqw3;yAk&hRWS3xj` zEsGe!=_)ZL$7(BPm`vn}Cv!CavUszaZ!!6**kdEvV0|ab!bntWxoWnUdit0i+rYa* zp$bMD`bgI{NXueYIVTMgxrvC9qua>oi*AcH8U}H*fHwVieQZrL`xI-H&la!?@WT+p%ZH!9bVH6GOFfXmilR zpeMGMFn<|lp&f9I4p{Gp)Ltt<6skYD$c~6^d<9;F<}3Vkj0| zD%0xu8cOM$KA7U(KA8SMb+l28nQXE>+@m^F_6v@xf-Q&KV#NU!isMC(P;_EOP!J!P zpkUY7L!cr8u|>r)CK92TYZI(c4Ekzus!@UX9y=*BWLwm9E6uXuOS#`c)kukZ^Xx7e z|FSZjZdrFjivr29&vwGabv8$szs^p(I?WD>9Ysr>)JV^87s{x~oH8nG>I&+j_5YYM zpIv30a#&>S)w2`!UMO%Yb~#l$ zafq^z(O^9;HQBlOO<8zUVZ($`0`sh1L1ibcMy_pR+T%N1-p2gEq58=umd_TdTU z+QuIdb4?AO@u2_ObW*IJT+!CFZt;;9tq+u-_n+W*fV;~`y}|@PiP>$<-}J_ecINcP z4Xgs({8!NS&4pLFSHqT1nj zIaDD(@9+&B%~|fI4YH~u6>O6H64NW~$;T1>veB(yRAT;h#TpKM$CAuay%IdJ7-IY0 zzwhyZ5zz8Wfh(mnbz6#AlIhOrlY%d zzkJ85JKmF%x|puiHC!KSWO^6VlT5F7A=5qkE010mpkgix1gJ8nflTM z>{}D7@{N59+7O>SmHAmJX+unhMh)zk5NT@@Jaq`G zXrA0S#B`+4>>=h-j;(z=#I$wuu9xU(W*S%7S9ZCCwasa;tTW7AP3Z#+o_RMdvk?m- zmVp;l{L_1ti_b7wP7W+uMoB1AoXr;OS)0deXPUy?aaeDy2VI8sKZh3^BS+3OT`)|y zKg*n&q+Asz+Rfl0^?XM*oXK?%93}P$u}a3xcv}yMf|aFD=%z@93NSbD27PY8aLJLg zIKqVsL)WuSo>L~Loedf)FiU zcVFjn&^+8HbOR5Bo~w8elj8Eep;5Bq98=h3xNe%37PBW&o}h{hOR%iif$O#vxDNs4 zr1s%kr1iO`V^{X3B)d0p2v|3O1_fpuU!dF?B^*Jf$!)WU#+2;Ov2HqR!VKLS$;$PT zs@5U!!G)81ePMo!bL;$W=mueRm=3d9A%akVuMl3RI*exZJ6e_%TGaX*bro1xw~|Ui zg-XQmsZiwZmRscsv<^lO=5z|mt1gpO8)S8z;*xY!40WpQS1(tQWG3A_0A0@93W6_q& zSHsK+xp7NU5mZ|d4R&TvWykZ)D~|ioJ~?uMIoaL6PdfjG#XSB~8TLp{UgbZ2W0pE@ z^*-r%kvTc#&3*9TS@L{dPA9i^pM0K|(@d6LWWHw6F8D1N{iwY6ThpZVAyqlyb`{a- zhF`VL6`0R~r{QV@;9xdO;l-wXr!PE~p=Pk>1@lD`__YUqSz1ZC7d|TQ167w6!{nnH0GfZiEb&8X$WojhrGS<;N zx$H92iIX{ZU1mBEx+=c%({uw&)9GO`Yd~)e360ZJhIuv{trYk7n&sK3bo5KDUbCEr z)p~?_|KRhWk5l;q>R*Z43P~0_b{tY|k^m&Q*j!?CjiTFYZLD zN)#vw-2w)s#qjgR;SC?89_}oj2$ZN`^aIh#F?G^f`|u3|U;_EH0kqO?unyI`mufd1 z$M>;x$&acFwyj1BsC1-H7_&gRPi2Aal4-X=CrP##S zEI2Db#Z+Cv*%t1hNGaM9`wg0&6W_8D0tO2}KNUB!0teT7TR|g|C~BpPNFxh{NN&$C z3kW&S;yQ9G*tQF{odbJLG@G_j8tS4;MnW$My`uP>qMF_IxIgs8(I5itKV>Q zaJ|cjVhobFo{hAc;yl@ZC1C|8(@1CMT)WGlSLo~L2mLfOk_BDTs?@r|(9f{aB4bgx zO$jX7tWcSu*AdvSTz;bdHCbHklI@sk&aS{i=qT8D+2Jw>&^I_d&QiXE;R`gSRGlK| z4^zY%#Pt`RH6<3l`Nr#Rf>DI4nV=0>jw2(eM@bdk+j^X+7xV@dF<-grf_egMhYXdg zw9f)Rs^cP&#mtpKeL{!y-6Nq^KUAnZ3mL1yI$k^{U`?u>w#4}hrfq|(x1~}s^Z?b6YM!_YOVOVN&R=$NE(@@;zT;=z zQ5vgi4m&tX+QDfJP_IM!PItE>neWx#$xyFaO;kg^9Vq>V*bZf+YR$JjOB@KdMdzrg zTB~172@9fdoO;pQ#o_o!?HChUZR-!(4%kp4AuvO$1M;Qt(Na2{)nR;?WoT_)^RzrY zJmL~>XH;!ou5|r_S?eBJAcy~8I=kO3kT%ztk5b&{te>9^Vjv0-=7NPIK`KzxrE)I>)Ux$GBv3z-*xu0MUxVr;1%XJwD72<3=-vPxWo4X0>cbC9_j>Uo&Ij@QN5f34N(@|DmGy@Rl1T68;TNNJ?~PKdt`HhfH%N>s=mBo)u?44 zxtC~G-P7>ly^2CA)$5)P^{?9_uTvx=duFU{P_uF%arHQcuu=#cUAzIsT@fXfxp!h4 z5P`Ji*R1v@`orK@B90u(7QDs^wc#FYPpM2=2j?WkxyixfrZQ4e~wxeNJrU3+v z0NX#$h;$EW|0h`PAF9Xv^@D@s(H)x!0MU4ZaQ%%`ucNcbh8bxjk(*7stem*lWO~!$ zbh+?m6D_r~&wgx~>;QVKXif)dQ8SmGpt3)X=0VKUV)GK)Y?^w{C6Iu@mBTEt-&mGW zIWR?q<@&U^uta}ZWMGrg*wr~|>q@h`Ou!1F$GX|%msU|twPp$SA*;kxeIvbr0w-Je zCM$fkHzlDO1*DzU4NlMSb}a}+wG*p^Ai@09TA0}Z?6t|tbXD`KA$S$KrQyKi3&#tK zgeoa2r3>I~Jl6SFW@hup)m4gT)roU?|#{;zzlQ z3l8>=w3s>&9gorM4nHYT5~^^Apf9(hp93{#n*g$u)HTgE!QUAWT_Pp|yz1ke&g}4D z`m7Xa3&l~mgk1w`xblqFaS#0o>~B1yoeXpjj(MFXI&(iu`7Ng1;6pT}XVMs8KoWwx z(AWl4Q3Th)Qn9^3U$0~urwBIq-1*UPtYgf@7LqHYo^L`tntz&%VHNP>6sQQ}HB!ZI zrR}kH);S`TQs6(~IP$oy`q(i)Skf>q>`IK4O#p1>Z=U*((7!ow2{L6+UeWawn-R7> zp^JEZr764fi%l(g936W*^^{;ja2+Er`deJcSjVubEYEOap!neF2kZlJX&(5iMbWG> z?WFOo#xKD75y;u9iU(|WTRh$+{cinl0b85@KCr#H8{KM~|8uat@Smpje*;3b{_lg# z-mlRwxBfqd%msBf4F@|5{8}8f>?%KwkSVQX8SC|CDJ;rj)c~Sw+(C)|x=}2a^wDN$$rLMT#muOt#Tjh3u?P>e$JH=G3>A$)u_eZ8ON^$=zy(Q# zVix6P{+Z1H;>8&;bv%GAaYuSd$w`iKmoO*l3~hyZ42M?y=C6{xOtS{Y0OuwmMk7SN5e=(nS#~`M zwIMpA=-fj&4*E3}g6!tX-zqmju!6nb?hSES*X|t^>>cRSbmtCh_kymee_T1IQxQKc zOAp6{={nuqYPXkiML&brxlM-qw*UgqZ$*(*pJ3quB#7>IYs}taf#SrCdIA0ZE&EJb zB-`5H106&FJTFeav1FH~YaUfZV2ZNJC4>rFra@1!*p};8o@_cnNAG_Yf|Q=%vr%00 zolYBKU$8Ha!pAs1PMi}Hy`QaRSgMH}=)z8JScu)$vK>~pq=j@Yf@@50aLKa8bQ0bF zcP23#`~+s|wQo^pgByU*YHKoyKy9U6%V*||h%@H;P!%4h_S!I`>dZNdDi!rqb(GTGyJGqiv5{-9UnBuY*pgQ zQ1+r*s2i$dZvC%L|9@rnK~dXYU59?spKbdQ+xsUV8|2l#y%jgC+b=)q;`9TQaT=8Q z$!s9PX*>qY0|&am9BxzB^;{A!BT%I#>e$svV9(!GLP@fu=GVy!Wx>d_2wOdqYyhheG#ZogK||3WP=Hvdbt6#r6RlsQt{g@PhH{cckMRF?dpxfdsoX%Cu{Qhy{!#JbSwviU(2Ym?>62hEMz!)s@n@%lVC z)7+TWE|LQmm^)uG#B`P!vrI{%`d4R}D;sahtalJWur7CsQ*q0Yfe)GQ+u}q{Bn2*n zvM$juz-iHJR77V&_!9=u>=Ch=%BvqXNn!Zd%0JCDPq+mdc|O(*vD=Kp(c8vYanV{y zyLpVTi<~{r+(bB^>UpT$c>c%ZW~QDV&8RT9rPZD5oLpfo`>QIrU3IR!Utx;UbVk@( z_fKD7uH?Yt?-!V{DR0!488cS)EdbmzD~CQ|ra2sD{^&__SIX4MXp=sXQ47r<(q2lq zw7j>_TzXYQmzF_sm#x^YIZSBWd8JMZcATS&k_3xBv5&c~Czchsw5VPSH&{Ho(bPg) z)T?FcBJ0(%VG&*}%RiB?7J-{TNY|%KH}`>;<=UsL+HuxXc(r_4D;s&m*=73E){CX* z(>SOQALWlvo7Q>wx?t#04W6ojwes56Hmb$xCUbh&yrQAQ0 z!8DVrmzw{O^d+bt@2+gU1jKX}S6;rS6@6{EjVFh=5o{7IbU8~Zq~SOOpr;HW~P6>QWBtV>)IWa9#wep$6dg%-lWEo zl-Q{@P9Hd+tHPYS57n@tA(~qB37rwV4FW^+&~!$^otzZZ74+fyV$>eivl0b@+8C1S z)SY^_BATfUUqEqMn~xBs>>UO6EQ4&XGTva~M3@R2cBx%)V?8$9QRO+F!-CffGmG;g zxt189jLEpUIG;f#-)ud!ZRm69@VwAu=Td46MwP&jfX*;PAXzEG?*nI!B#APSpq>&V0rJ?cBs_^#-NR=C>T`u-# zU8@nNBg&P~8I}$~1k^E_qZam0kk!29>FV2so>vNt$#6sy-#LjoPd*+M&A!}PRsx4Jrpgh|FxV>CO)bwiYY zpa)8I6On6TFPhxapi$jsbq+Wpry8OHvlJCrXXm|^8tgym45aP=7-ndNZCTtHq8IOC`TZ8aTk})~6O7}Xf(K#I?jUjZY4il@ zo@S1w5qLv2DaYpwtVwO+nH~hJ){69#`YgxRTxPDr4uynb`>%lk!327Ev${Me+r&AN z`1XaGPRH09bC>nwG!Z|ZQiypl3{W(=S+t1`%kpma@K>0jtFne~8sI@sFabzT6{$Uu z5O(0D^n1xOCAw(Z;oUFXLcXOn>oOS10L~-)UbngsKiyY9+k8=Pc>RR=b$yD zz~Xp;U@ohET0j|!;wGAZnM0>Df@xdqM6-%pWA-jii~D$xuVW+$^_u2aAGTonn#%r{ zOdBUGIWL>8rA-1&Bo|J5t`h=igoIgxyplsQ2ma_Fa1M8EbDwG!0e7k*qg6~`wfnSRD6hH;r*0avAG)hv#YJt>+W z^e!jSajL276==r=!ffRKUT;olnHR&T<=z}cs3-JxAdNLZF6p-cnIlhrzX8XA0{f6z z8;~h-<@pV!ar5Ru&`DL8j8%e!Uegk$G87W`$QK(-u2Mb-AOW0-vfxS1MpGDW#*dC0 zO=s>X7!+v)oD{!Y48$WgniEnQ@y72iub39bQ_kI0J-R9=D0U@ZF)OoZ8bt~f(1lq} zGLZ49DXXrgte~bWttspOp{8tW&d-YAw>h%dm%CmuZBv3RM3%o|I-~>}gY4#e7_u$q zRa2Cn#;$j2CjaEfYn(S2BZaS<)46Wrs@Khpsdogjyp%4_8JP80z|e9n#mk>pwtfRY zZ@1#0?5aUvdEsDX)6HhS)teXrc@k6MN`OC>?Su8<8_j%vSJU6m%nV`mexnv&UsRjJ~e}L?rx-Vif->gwgqzW zrzTo>3_mW+QYnb#>Sj`tG6FxcOm3%`@~Qbx98I$KL)^y5`8!Pqbs+!!esf_nZ9(BV z2SKTV)BkLN_DHRT_Z#~;Z}Ny7*l*?1LGL3$E|g&hz>P(6!vX4CAO{Wri85*XnJJcQ zKOlzCt8(#YrgvVV-tu&%GBfl-@j{vRnHiK3jOh@bjPf}-8B@E@O^*g++MBLJUm$<| z+(g}D>GDi10_;?I>2ouX&Lkf+y>QVPaL}9&(8~|9WOquX4R?0@ z67KBy{}b+X{%=Jad@2-eb|~8Xv-GQ$Zd%-(KciucXtV^@nq#IYLxegZ!o%+qQgs4R zng<0e8+UK1{*jhH;%k8aZ|2 zyfCWHs7Mg?ywEVapL13uib%v!Qfx`E7rf%26!?}|{wI|;-LeXm?r7gpOY0FWGhK4N zAlAe6L2s{%Ydn;}j5R);#*$+iyiU&v@KZgnl)GW4(aqL!r2HMEQjGne@lIq9vVlPjK!bNI;180ClgL`q4uymS$Ee~*9FY7fwRO#5M~GJj6B^CP zgzzQXb8}j62Ai_NO<6l?UG)o0OqFA2xH{ew+yE#6kb@HIexXD)*WZ$-8!R1e1sa_j zLJx^|2eFBvxr(!!Np>n!akW(;M=xu*z*Q~Q1OZQ%DJz=h=aw_$>i0wZxY!)XCH#pr z`wE_QgMC??UO#Qs_De;2!WY}l)|_;LRsd=hnD_Fe0;sN{@)3RGS(d@KvWyY2URa61 zP5KH>q+3go1>g}nHi4r`c1#nBC>GjrUFeifbxrS{&6`LTT*81H1g$*4hJNz;CQs|6dF zj6keI=`d+L_sYb>rbQ`SKG>BO4|@xLo?=&`<+~LZiS}is5~^BW-F4yon+b;>|uuRpW^6#gwJs1D1!f#dLx9T?5Vfmh=ZYXFs zrsekGIpN~$ETWU8WIJ4Sk8gXnN`X4ri-We&Ewc`63>90sIvrb$bI`V}xQdF^!#eIg zmq!gHp%wAo;CAziz1hG$=C+I#Ewv2Z+1eWn|CryYSPw{VXMO6kMxXSmHvZfjjtvNC zZ@hn{@L$<{q;q>4blF2YYGa)#Kx%B?Yx$dDw%K_uJ1%fnE}a1N!dfwbRl-?bW~>+q z2B>IvXucfvhCU|{n%OpQuSrtyl+wJ;w+1U?Zj$12W8pI+so|72vfz0TZIrqTX$Y`i zoUG%*dubUa3-Y2~2}~`LvX)K|ENm??jW~uAk={|xWbc)nBRG~4ax8o(32LUywuNLF z_@)p&qF*5>@WKt}cLMMBYlHbE2v~#YT*35CRKH9HYD0+p9M`xaU1#?qWGO}SeB1x> zWcnXy8Ei3t6FWqFv^aYHtd-c}ca zpF?KvaYYp6upVfqSbl)R4&`{j967FpHWDLbO%;z4At}SE4#~6KE=58yH8>uT!zl4U)o4pRt5SeYLnPjiVmG$fh6ar&=|jNc zFJBvAZ_RPwP=J?&-i(7|JNbVJ9JR-RBbcP^3J&aTiaDr+Wb*g_gSIz;lWR&F|GRE= zSMS~To?E?dRh4~TWD`Vgkl4j6mYK1RSQ3NS24hr*h!zozB&QM5i6xDMFiZzQn+bX( zh&^VAHnt#W1dUAmzt6c<)zuLh@B4fI^3k{Mxo1Dmd7kGy=UMFh_-16_w77YPesr3C zkeb$y&K~`c0S?LdBGHeopXf(D29@1NQp!Loq#uAT%YqjWA1xtElyzy4C(F{5omdv} zRF_x;xVL<70@&58j&aSDST7uIu(ikuc$mM9{7tRSvu1TxiBl-S@Wyzh$gEB?SDt2h zT6FU^yDp9!MbnE_AV=z2E%+$WM3ywsmOyo@c+wPq^X%76?HhWaVF&rGqXUV!31jO;#K|Q!OSj`<@@ZJ>+$y}Fq%^Y)2fS8h#`iz5I*l8rmw0Qs zBXTfDrlx|-@fn+?gOuC|m-(*zmTL{3zRxJ0;#LZs2cUdFurpiiCK8CtpBC|B*&!0q zNzb4R$3;Zp@@U9=M&!ccj8o=cJVtf%M6+|!v4qIG@n&Rre0tH9Eq5YlwqPv=*COm^ zgNUc=h#wH~$zh^NzQYYQ_L3j0tH`Vt$j1%HIb{Tm2>h^tV(2gHx3)u+HZV^c zB}WH2%si397%d1cG*ME8FY-3aX9R61Z}|5Lojv+2dEgL9lm0O=NH2j43U*PDo^T6= zp_^F$vVico($p4f`kB%ey_wCj8<;khE7Zn9_qXcdqGiP!C?GV4mFGjyep5~?^sgaT z=AM~07UTd)inGrI<|qJrpU{k_8VF9VcCO72>(eHvYD4=P^W?Vvt%Z5cse$_!p5tt? zwmqzW8E{^7P!cSsT26`Cwzn}5%)2!_weQcI9PG6Vj9E4AZO?ghgKum}7 zoq@!X7?JP%j>r6bXD<@h=Q|S#c3xQEOc}c!6cUt^GE>2g9hXcDZDmRRhztuXBeK+> zy(%9ev5Ar5-$4=GNMBXpoEEsQ^{WEsw*l+&kM%E#5In!~v7TGxEG=C2Dd&i&_roU= zqN8oT}LVB(m+e=zm#)Fzk+BAZ7rGS+P@<~Jd2 zPpAmDzH_8QbW9YU0!KSn%d_!l=fJ|n5);1pT~sD#L8ILg#^DDQnZawrj&e4{2_1CR;{?55RV7>UYo^Y&lxwYm?{p7LESJJ?m`3>EEjIIqQWCLV5Koz3F)8hvN3T zntY={xzS#Gm)>%MGgn%<;6%T*n@@C%Kbro(cf=n}>xsX2eihj39X}RF)X16SJ}#2s zB_lPMXyaiF7=V3|RuPauP(inQd;R4}jyTXSndTg7eVnJKO>@r6EJRyJZ<^*TB&x%? zCp$xiEKj-=VHZg>pbQ(R%`lgiumHXqA$TgoxDIT6?qp{~An@{{Q=OPKdztQis=TGyuMPIp!j_hOG3&K{-f37uhO1VEg|KZTUcBEImv8P4c#RkDA~a0W5L zuVygNrMm2NXDIUJ$)`KLbN($sF!G6KarWuXSgXsCFwkh{obD8`?be^}46yObA2L!A z?!!#gS$}YbR+Eb0fFrnEeBzxRIRl|HjG5X@eeEBdh(7ZVPC0cn{K07<@XUxaoCdJO z!)MU*>-CH?ovP6Lw~EXD)_G?-L`ztt_s((43Pr&T+|OU+MY{7WrIFBo4I!Eu5^g#-BK-46gKG&BOB6*T&6nA@ux?s=eTSP)W`DEl@J`bsT zo$dUt9J>j~CSS6>H@@ZWlC8NhU|(6J_1Vs(iaT)`fg(OxM4Vv?LxDpWvVL*4Gs>D< zr2C)aoD#T7H=g74mDknh(A7Kj+vhlC+>m^Gjw2T(gU@yP7R)V5E_72+zxA$LqR%3V7!E z&KZGwThBS)Q30Bs^GD}i;$n=sz&Vz;k1uk@F_F(*;Os^lNBxNzoTab)6P%z0sQz~=q%JzE~eKT^q((wPOp^7<9L*1q>%nWce7c+M|JSe5ZYUH z|35o#5;;3`3ElWs?|q4L7?(8HT;f!fr?wURjL+OBO8)H-sT zJ>t#*Ov{G2b3`TPuQa<&+|?T9Bs3k<9U_sT3^}nL2!BDJS#Ln4x!&0mA4PB0Lnp7+ zwX;}_7xm$@pm-5-5A!xJpZ!Treyr}0~3Jk6z+=C--%_=T~ z^^P+!s64$rjD&w`+r-mW$K{mtzGxL96;WSQc zO=Xk&cLd@9`s(g9(RY*NtRBgMRMu8~e}nUDx}S5UGpOwTT;jTMcUlz82xKHmS|F(d zuXJu4ur5Qw(#S;_C^ggGZZ6Q|vYmT0@qZ-80dN8rpoMdt7Y#L->*MLrRnBFli>@I4 zCgzU@OQP>1xB^_h(aUAFgs5^S-rwrONt%#C+RZ zTtgD)*I@Fa814m$P85V^5S0?idRULVqA4w;B_ay+hWUzGLJv%0e{nuz z;{K8-svwCNzC2+99xf7P>kAhCIVTo4YhiOvRiB@VL?``|!u8^qN!M;GDbt6{cXCjT zK4QL88+!b1!ja6@7tM!Vx>LU}AAoPtMc2SI&eCJ9aU@{QS=X@n?$CE$<2;vjhe#sy zp^KcO$=%snBe~CPs}Te=@miBR@&G$%u|Dhp=Q$o1T<83%kn2nn)s1Bb^SyqZGuBT6 z+9l~WU30zTu~m<|o_%(|Zob~Zr-6R=dS`$3&yWRBgV}oW0_Oyp^onXL^>JsZ80{{d zRMDSyix!%8-!6isJa(Z|X)ROwt^L*W5UK+gImhg>pSPQS7T(UNLW0F%evi!4OL5!NNc#H~etMjUe+3q{|-Q!ZOQgj;G}X&Jhg&;z7RI z?|-W2-RO+AW__mrej~ef?q~X&8=cpQi1Wft&adJPD7q&M;ABm``tOuk6r4NG;+?P^{D+sZKKlj3%>Zob)>!lG=x8Du$2=ilOt&z=QM92S_U zom=oiar`Y#9jjY^i&J6MD}B=~PM_gv!L8UI5>zV{mJ1{kO4*)>dmk)0mF~O+m~GaP zM`|XO8C3u|AY>XtG+e`F2(8u^K2p;!=gvHXje6<(mN;c{mHxydHKl>(*0+~97ZZm~ zAG;Lh_4-%!drO_sWA1sC^Gs0^^@x9wklfVS5BNH05c-fP95o{Ob7(R9RbIZJm3Y^FqLS_DX%s z9e`2Sm95v_;hYr!j_&Ih9;}}qTpQ9WS2_8C%k;z+rz-oh9Q$wh7t#M(1qGU|XOYI^ z<`$U1%Ly6I+vWP7El!`noJDs+s^;jycRJZ?@3;|1!`2{NK;>8#2##L_1Lcle4PqM{t<9XN5CK*fu2J65r4Ks;2mO-Q29Pgf;}=j8v^R+@Sy1Ao`X;sPb9wL3GaiPBb?ECK zcA|wpbh2@Wd(ZN)=j$gP=ID2)KH&vJ?4Eec@XxcJhN=CJKH(qElRVC9HGHF5?IO!JewJ@`k>zVY%h$We@|BSx)yi!5LIS-$+~EUhh%!1nKSZCa0e^rt@Q(#M?_^^-3;;{$6Ky#!*IqbIz?YOd9% zyaY3Hg(h*2%*Gu16aC6d&IERR_+@8E;L+AeFFSUC*I&Lu)sN~6UUAOl?UPqH7d@)O zuR4DLo3y_QZ~urcdkysSh#vczQ!{*Y%1|c@V;iw80vtJa^S_bP1Vnbk2NCcDcjfuww^|~x6vt$BxwfUB_~Fdr&1NiA8!N4TQ5r=Kdz>I)ZN)Q90-W({SKnD zXVaXtz}_?rbiyZ)VXkvTSwqecwtoI~r#xP6iP|6XikyVfM4_;)IAIxM_2xn5h>s+M z2V*PQZ#fAynrtz}7S`P_4A0J%kL6L(vRpKcHaJC?vmwshMjrv#-v^nGMFQ~4+YQrYgz zZX$5mNwG9NZeP7|i0`kPi6Dv{VJ3s8(gu&SA_dFoshD163RBL-zqDm11Hclsfp^Qh zT+R*HPSM7K++T=_;Keb#7YG_P_bbSvvlG4@u~|319nBP)^G23&?a1iRM38#gKv1$k zaNHthr?Ry~cV%2{@CEIrC4<5kI5~`%T_R>A{V6fz1csA^G3Q|*%)Xon9~MBfQhqYpvKK)JS_h^xB zc+;60y5~uRKeKh;P0p}tE)tR&-s1T=B2pLuT6n-Udgdkw|E2ozP0lT7Ump9GvsZqH zNNC{HGw?rr$6K7s*XYU)=dd1bDD~_P$B7e`!*qsF5ORD%p~QHeyrsm-^A+JXupPyF z8Evs41z2+|fiT~VnIt8A>BsUt1?`IvdxvA3#Dgovzhp{K$#csJ=0t91VJw|G#O17cr=R*C&rE(D!@Rha{6>uEmuMxUYEq+GS@>p)dKu=AI zG%Po!3F2qOm;~Vf1JI(~lub9r&?bewkUlp!;R<%qV{lfmg^Nqdv{=S5#A@7}&^tVT ziG@!aw^dbQ|8H=F@0}{?13;n{wgcKxcq6Y}uC>GufGj_bp{$sjbSxt9fn&=?sjta& zMwH~;q+=+YX|Z!D(`2F2&a#6iS4kfzvqRT*?AhkG;b^LoM-f?N!DN0fld4^^ESNI- zo#mT!^zwgGE~VN<4leaAE^wvB*W@dL<`#&*u_mZBY zIszyeF(as=K`_nAg>=UrPWi}Y>J@mG%ZO}yxX5ZSTbph_Wr-dTSE}KC&aw&|Dfh@UEHoJx>tI2JHBrD;YCo!KD zJKr==OvK>qDLHc`yzu~Q{|CEdM^FrzO#-!}_SI>QGJLqhrG^1K+Za=_UwZD41hnsJ zhma+;Lw3GFKvS!wBA%(OJAV;&<{|qJzwu>=Vg%FO#l#Mg`xoQgCd+uJkk#w4Sr$nh zcG@O0o5g?zV3nm`yQreJmzmPk8o~>v?+DZPF5^W)Pd&+Tf1hR0t~tcyap!Qo*?G8v zHIzY6_AOXqu@6F#`~!e9--Z&P!0aGPloLsxY>;U}!I#S(+_@O7-Ymhx^ibdX9%6$8 zbo>NL<@m&=GYITQXjKH#Y3`eVP=@rsMXSda$=Z1DUeES2@q~ydJTsP66)X1eSs3Am zEz(<|e85hu(EbzaNSV|+;_AuUeqFM3K~>E0aGFbO9XuUT@Ui!u@=!a|?6vDR-gj!_ z{}1SQr@&=Et2mP0OZ$wZTslEK(z+XewTZjnl*cDLA7h%-J;3gwf>>m?T#@hu<)b_> zSsKcFPwEPH!ymx1V;KXHPhl(x7h`zMY_(p9a-9Xj>efKnlYU zHKRC6%iNU7DJRcf$sZ7)Wv_7(4lslR70IZN3TZFESA)VJ{5^nJUd3}JLxE{TSh2h% zU=8<~s*dUzu|k+CUHUQ7fd}=WA3M7ZY^ju+vy}D_`rKIOjtw?f`mDlUn9Xd)4x!L% z)^~sGjEi=b;+M71>nvp^Hx%=017X`o8|*{aHFS0K@x4@;yQ)gcv_aG+2+b%QMwE-8 zTb%yWS5-x&PJp3HYT65Z&7IY_bvxpT(PEs9AM4Ii2nNrtQaA!L)rDSHxEr%G)$}Nw zq?0HEq30adP9?EMjxTYtwS9!0so&ay@Mw+x&lcwblr+xd+^pYWMe>h5f?&wt{K z$w_ui7k=tgBGZ?lmG5L|Rl+3c=0AnY|3K3}bBNbp2m0&VJw#l+nIW?B!VLVpA+E7U z=)%u9JXh)=pK*)0ixJKJ%o)}_q9+)5Vnn4o9g%CCYW@$#v+`QTW8$*^beiRgAD7z&z%w0r(fyKpF39v%JunQI9o}b@g<8{sjvLfi4#vKcPooorFY-T zA^TB%-d4c7O0V3CuFo3%@2v}e#dB#DneP9}xhPaAmc^BNu^I`|%k^_%BDAmm~d` zqs+@u^1`K{2ro?OsYXaJ)lWv>XK;>5Wdb3NWOJU5N zy?TC9UVhHwr(e{(F5uO)Y2G4ZtT#%?)IcijlOHT(ZIR{_Y;2=&P1YPxBLep^XGFR9R5#m_W#tU_-M z3G=%3vxsFoK4YycJQmT7@`P!fCr{eA0Os8^hvFpCjy94-Hnf;DFR9BFY>~IwajFzd zZapBch#@x0v%WOdk*&1bl-91C47wx_;-x34*lSGIuticANJ($7_?sy7HkJy0YGNfy z0a$xIwi6ICpnh{{%$B<#TAW?B!$2EJNQ*oMo7C6?G)FTa_}wbBZ`Y4YEL&E|c4#vA6jN8q1{?=SNl;5C#@kM^a&w$ka* zelk7Y0|J(2+Q0^F;g~B}VXeO5d#+nm$fn*yj2m+K?gtE4J%{FIHJeDn1M!YYOR`J9rJFNkcNK-k* zn@#aVFI#v?vJXvI$RUiQ+=@gwTqKl4DAV6sQ(~gj$P6d~qvFlg(hTm$u)}}X!ixxU zK>JQ%+0C92W@xo!X(J0|_)Bm)Fe}kHs{OeJT&kma(yca9{Cec?W^prPZ21jK)graD zP@X328sG~2|E{H9+s>FEEox}vP6{Nt9&Fdq6a1JPtmQC+yQs4apl(I!{ z`T?cJRr>oMoI}c`6+7a9@1^4v)9t?jMg6T63y)V`ZcDYuE!brX?^1o7mTHb!wk)j_ zVk0c`HHuIXn!gHa)m$4c0D$r_K)p3IW{1Q-4@H4oO@0w z1FCZO&WdO+_D!)c%!LtDdg@^yg_>+*6(ZcXb;%Pn-_||PU3B{Ity=@?pnzRpndB;& zWP~mHu%N1qH^3tsP$H+l;~FYi4_10e#@tGdxze#lGWp;$-u^BW*)8IT=O3x1u`;&T z1fV*QKk4}lxZeRH;wTgo-VM?_&>9RVMX5mXY_2qdZB~7Yw2~jGOV!0fi=T{q>c-0s z_7Tj$>{6YdsfNT~7R=U}>csAXC`f{Prif7SE>nLgEDT#%(2RVn2gx>9$w00#f3N1R z*8=k{J#4N@j<R+tk5eNpV2`rLcbo6 zC;Mxa)Y?U@GCsIX5!^GrZKhVA^Y|qXP-~dSmDV1@k4X2rFl`|AnP3?fvq7Mg#hfiq z7PDTSEM{YMq+A$;dcex?Rn+0bM26ce#?nt-!ZIO?tw>)s z%O=6D?WNq;$PY+{y-qNt@HqPw8f!ELl7^VT%VH=|I4mC6X7H*pmibxLicyQ9whaQ4 z9Li_QQw9wZtLL}ZXkcA%5n}`Pu!msQ76=}qcRdkct^qcTF)Q?5A=SI)r$!}hZq}EC z)B%~p;ez$Lkm`4UobW1a&A|!M6HY{tke-BN+4#_AV;WSrHO!_XAv-vcc`m_SM5Ic> z8)b+Dz9Ld$uhn~Gt4aM$b$oJwQ=rZDpj+q0O6(h1#~~S8!s4KY+BfT^IcnVGn?$;e zlMpCQRD#jvQ&q9O))8GTe?!5D{-`eOF%oqFsmZ$Zo7t*#I7Q8l{Hbo-k}7IYh~EwU z+Z zz?1s7d1{g}q_jR?wvk1tAi3_%Q~MG%W^116ixc#!d^G}9;Y0FOCATQ2=BvMR%|Ez6 zof_Dv7Z#|#!=H8{^bVic`Jm7Og#e2qzkekZsrC9yIooHmHkdXr{M8>9sB`GWafPaE zjM&6v6kx|CISVl`Vo&2eDG@(ifM`WQx-kd}TEtV=D+*QroWo`EMl$#}3f0iSCY@iT z2L6gYyc)bNd)W-F-JD#YQOzshcVbS0XrXv)7iIuFvM1Tw+YDvc>frN` zjGcN`k?Iw1P}8Ba*3r55XD9gQLhlVWE&r9u$qt$s%lizW96(*s(B{ar@`{I zo(l7Xukg0fxpw+sc5kdgVAQZYf(%zvM*(%{Ud75CV8*n8f;;I!yAmeXAH+s|T(PPw zZZ}N`FqkHEL$T`ByWW+DKRRF=q)G~I}P7181z(%IW z+bAFHC33ZDt{(-J4HDR_)O*_MFmE%N1od^ulT~?F3UyM5o#l1VmX9m8y0QdxfCJgS zc%gG)f%o-if4!}Gp+w(lt8jc{Zt@(*+DE)SWIr>LUnYVR-;bK(am@+e>bU zSd6`vM~nQ;)<9+$0J06TUoD6Xl13WB_9y-bWCj|^kp_Va!Uj5?$%q!|a~ySj#S6w0 z85W0-2&CQ8GL~50q^y-^bsO9-Sv9@vGl);P_704_v;Dz_|XVgFJwI{ zlOrTYj}eY5QzJr6;uGO=eP)^Jb>waQHivWi;XS~;;R+H!GQRX;0SR5{ITn*pe@j}9 zW)hO8U2}{@u#Hqfe^{n|kL2r+a`hK$_9|^vs8QCuReD^7+A~nC&#O>}g|5zqu4L=u z`>7Mh$(l&rm1=^871`cZs=VyRT(+4oP#_iI|F}sdHf)!kRimb)cSqM$t07>d?(_oF zP5oW9DhyQUoEmjO;xn}Jxe@C3x}z`QQ%BUQgL@S7N2iC^s!TniPK_0Z*3;`$l><-3 zAq}KLFK)`3ZvR6ss#A5{b#AXyi`^>GmYgc_y)vS9gDW9nV_Ip44VxTL z(C%6kO^(Nsejlsvj;g&Al^4mh8*xHChP@g3g&K8?zBvZpu#5dIBIsV-h%8OsUY>HD z3K)aA{x8MS&&>i+f|`<&cf)Cdoc@Pm>EQ*bxj}%Sr}kD;tgEwS;K%B3dIOL1^{75- zO84YHK0uwU2liEYJ(A2CJF!)42czC9G0TJp_6^pPv!4Vf;DTyQnN0n+e$d(~y{VrX z4jWV4UmY8hW0qXx2kmE|WJHABRW+yR2?Nx)9=(|pctTGesD5993(5xs)8%otk)0#9 z#+CZ9fvTd~%v)Esct)P;ADUo&>e$SNslZy4iMfO zc7S^f|1MPKC@k?$g|iTeLlUmFH;OTK>SIT#%Hw_N%Z(C@MdH?|DrWsmY;B0H_wXHn zFPF+@?zuQhQX6=FuCO+aV!yZOu2Cu)_|b+f!${Yl?%OJT9Ru^yTq!yRZYDczv;L&o ztqViUyYHG3S2a3P0`(3kdqyazWd9PJye4~BLLc2d#X!-kk9sn-%6D=8*BYuDlf`HCTNv$!kRuE zAPbqM=%e{1y5p^Iwst3|NLm47GoD_6DV75FO;95Sr>x8ktR$M)MTyaDn-3uo(?KXsq@ZhUL9Dz7zmo<{y< z?4#PRPVp!iUfb2WWQrP-d!-35kLI90e6s2t-fRx%#y6aB_@a0Enw~dB4b`oaRp0+n z&Hpx4rD}fX$2Gq$HP@r?{4)%j0{7@m`>PEj??D8Rv{qUcv;w|0=qc1*Gy{V5r zME&CD1jaA07;UZ5~~tjeb*iF3kii}*N``$rTR{%^aJ zw?ltMru9d9^WXb(oxW`<$I`#HuAi#T3Ro-ZTPOSib|G}D5TH`se1tl*^_gF)MFH!b z7h6yKHJ_|^Zf{+D1k|n~3&H*z(H`cqCM3!~jFlGQlb%0P&BP+yJ4#IswCaYV)Iby` z{&Ezgq*cFu6eNAME;IiGe06%{hPEJiE-Gkj#a~R zJFv!!7I+=a`s!oVwSk4Y?l@J8it&WwR2y$U90vuj*LBA;>Uw?T@oJ)!yy19ts60PB z9?r5}_dP-Vk;g43sBIX*+;pNE%=T>k$BAlTzOaFW^|1o!4i)O9I- z29!kGSvJk&)*Gg&GkKkOvf9pq?0t$lCn#oZ51yia9THn~wxxZl8e5C_el1QCjaC&; zuN{|G;>Ep<0|q^scB-07S2|CnV{hvrr>SAQ9dnv_yYVzN7$cvjPg7qJW%|+S>LENj zyreHUo$kH&l3pRt?_X-|I9>fEVBK@GUU-K3$ZG#a&pJ~L81>%sm)h8_;rp9cmr@vvc(b2G(0dw2}1 ziE$L;!8c>DAp1`?KHKAwFOeSfwx35QN}QL<;a%Vqty;?N4=iSG>qSYHG_fC$Pf*A$!SX~QI@4<)BMQ-xI33>{h~*`1&* zi00&y&unl%Y7 zY-UZC-Xcv(t{PeHwx>SWYa=OS8B$<=G-@ZTL0zgMlzE>fj@UxMp$>*RAWucvkxw zu&BDZ2V}|RIWw`7em-XUNz`<-bxaPpkOWLI{c?YYgg9hBCKu9~6OCTE%y;!J<~yMO zQ{?tKN_;yS_}=LN`UKGsKWj4MMsYb)>~BN% z-v|#r)3XdYWcp;rNPRanDL4mqkXw#KW~@140!*gRM*?<*(5LmXa)Mxz(uB1UP`jgs&LNETw6bf9d%25sIyab`k zmHMEINqkgabg}B~xAAJK`qtb3tTF?<-gSu@TK=(v5-M6ZzcW70riyoBjP~qQMbWrA z!uawGkF0oQhq7YDWy?5zeb5qhhMqBp zlj&N$a1H|0Ir`N(0ODG`Z4PYQ6?)VaDyE;k(y%y@x$s#{`k1+Z`YgR+cYIhZo(q7^ z*ALEB>k8*1@G%C3Ebt0_+6_q7X6p?%s96r=ZwJ?OXWXcIkG@<*4R(e8aURObw_#`5 z!!_R4r?3ORwj`Ml@iso~C-e&hbYMlmbIdd*dA%})LCRY(ukwbsm2M+K~p zuF`+JT3u!>%F~apVXoAC^;=v;oiSgHVd-w4k6(~Sb^Clo3$yfR@>rouuTe*1#(UN^ zO8oyke2tmtcdk)~iW?x@4G>=?h_XVi&{pFHC=2VtgBsN|D!i`|R(`pDp;47M;-R^ZtezA4vh$?^vfvS;V%j-x6|^u~pVJ!k7r7ji1UUmv`P30bGl zS)_hRgHJC47!#J6N-IS-`)Cy{T=F@KQSHs*fvNm2HM6%xA9r2(Nd1-uJ71_f()V>R8UZ@Nj{W;NWWy~XO8?C-eh zht<{(E>Yrdrl8q?{{hV$GiK@Yn>kEgsh2jZr+J*VR83`TJiL@M?4A1GOHIzYWwbg= zPhF;V4>YyTU8eRCHyF5685;Lia{-mm+YLgsyjAJ%Ey)-0OVXTtF>!89XPj9+2m4^bGlP^-bBl*(pjiqTx&we+izc(abq;P%mrP~?H z>SR(cp|CRQWP-f6Bwx&^m-yaTuIy_SBxFX}`iSYZXT?tyk`?j3%J4p<)R2r=6=h)) z?6oT4zoAfKfJw@?!DvQ7nV7nXQ^+yaS)`i_3uEw!nWSme~4w3rkaid0Io zNr9t4z$fA=Y9zuX`oj=8y0A!0@Pgj#fQaPxD?~$OfOWCH<_=XVMv|+^Bhl#A%d;50 zS9xOSb^+QS@{=r$5?>3)x!(7GBld03J~@3Pu%~>5CAV>?lVWPxJAYJ$<)C%HTyom$IVCkoGcxcmdlb2JPr+W#))YwAbU(8PSofIjN>c|~Z!Yo?c=QAV&gIgt? z5eNJ|J0oBHby_VM@W}67yVe{mPiC{=o;~@w&2$qV$>eGy*WPwVst+!?=tH~I(4J}{ zP6cR5`Q!uvXFpV`A-wbhX0#AZYe7EKLWg|oNEN05L?@Opa+S%m-^aCB z66}ISfS+j%AO7CjC-ua;)S$YRImwB`?MxzTr_|a>rtja@bMI1B|eWW))dRL*?|N8DsJ^)fZ}+%64H@3>3=qL&4H6bT4U|C9L}5KnL!MfH{( zJY*Y)A8SAjOSBDo&%4zxrZ$`GX#nwQ7k))^y8)uwo4X$Eh=iRU;6~X~l0&SPw$fnZ zyDq)yZdEm+MS9voPv_xb$1CfutxBBhC;PWba^YXwYk5@`0X9lB^GecCJun@LzAE{-d@D|f|Hf3i=VlB<(k58a`EL$UlW zNs!P0O^@n&5}bNjb)>eIy$ta7>a+BUm8#~L29jk?Q#*8c01IZMagOaF6O6`?4UpjK$(9KH2j|=~rXcbZ>c=zVjY6pr%O@ znoI&cDX?YVibG^Xf6+|+**&W20MW|uhq$05E-wJ9P|t4)`sKnN`>eG80`?jRWYh2zygdf)Zh*I zK|-po+b&~_X*83zJc+je$kB&Rak?+NPGko>FolXS*nKte9;S`<_oIv0C|~h8>NSW_a~jy1kO+C+o?NM=s=}fK$Re}8o9&$r zHlmei)}&doNC1YO*xZDlFsO}?+f1@dtH{0d1%Iq98rC9(T6QwBw7Fj;c~+6Cta6kW5g$uoU&iYrn8rl+4jfXV$t1iJp2ujV!6h$c5cN9^?>PpY}OF-Em=UNpV-e z3z||0Z;WzcUQ=!IRi z>`&p;n!7nt#4UU=#hQB*L!d-AFfR?na?Va|1hHjmkMtaA(kzAY9*hY=XeUgWwo7~6%yJyoSMXDQ_ePOp zM93Bf99%7bjE`GE!|ik;#hges>Pbz`ti-X(n;l&h|KXlS1XmdcA8*$}&ri*AmT;9%9Nt(6bBV=P+v9BjakYek3$LV_qO) zF67Onmt4ei;1`=BGd()+EJCKwb=k8BXg<`xdRE&C;wdb)ASn-iA-hf>HANu(XY8g_bvtK~F=NbL{3sii&e(weKLo)cl51+dw zAv~lK_gla1=7e>{OR6Z^F3!4)=OH5yH~v=;m@X{{6pKW>GpJ{N=?>5{TAh4-@k`1r zEC4Y;drYMCV!ik!dj#0_ zbe}o}WAYQ+mWVrnRcKlOBuoEk6Xu7@xD6qwU(ZkGqNG#kAuc@WLFU;8)T3A-0N@x> za@z#^21yY1E#!B$z<=;jVBGX?5n$u@?>hNq`iI;CmfHYxmlcVl_MCxFL_ZK;qRTXy z=g7_Hs3u(66TXdOvD_0&LkJ!0XG}Dt6+J2doLTdhU=$AD z7Y5+sC;=o71k*#3N=}gma5mjW7<(zUDro!O=hu)^>}CL5al1&ba)pP^#Vs5UF)`sm z1PXJVCafP-Au=%Vlxe>!TMCc6Z9zd!VK4)+5BOS)fuJ9mXM;Xrqbl2HeUK_?w;kq# z$<8)#iC}B{vyfEBXAM&~?isMLDuEx2s^9D=%Km&$>F`;uHuaw%I z@v0eQ63iE0DFYa*PG3qHwsf&vj!Y4L^XN|%sC4*Yr|gGt6=c?lyG~JL3e1SdC^RHv z9Hv|uQ>G^-YyVit>}?ail3Jx*Y=@|NUpdwEw1(1!6%a%be9K&RP#3(R2Jf31x`}m&0+Im&Y`#rsQKP$1;IAjMY`idrB6$LgxZz5!i;lm6 ztocXlfaEy2K#6rQh^L+8VkRKppm!tTEoKgL?SHXv2r-rzRjOlV(`Tb)7s2Wg;1FjS zpa_9G4psi9xAHUe!B06ky6#O?IFbti0Et3EusK+4q7yu_-*%2`Ee89lGZ@sSt>2FPfv>B-Q z>`jHoR!j#8^K4e62ke;zfLd~dl>>H`_eM8adbgV_F&NlVLzZ3^1Or+6RGx+`eI&mO z+TSd{IuiqijY=T+t?|T&Q`2jjm4V5CI+1-tC8$l%m)DzLLL7XeHQSGR+Pzy+Mf+Rz za$-vWc~bKydnNWrfH|T}2%32NB)n}rM<`lhBoq}(WWXE{cOvR=Ks-;NO85;*2i+@*ljD4B>GGlL+UoDIP5|J1;bV;1SW<|y&(T$0YiyDv_cME71 ziM3CH1XPe*Ct~J7eN0JR{}uv`)xoHN0V+?Z0XTLGu)wu_GbwO!6h#Z%#}bqm@+c!o)@0C_ zA*nw7qVW_R+$aPnnb(JCXcQ2gj-s{%r~6Z#6wWCiRYB|i(lo)#B$np3pDutHJMJ?n^uJjsdva~^ z($-`}W{eP*J=P>Q!MgxkX0YjNvMMuuO$^73+}rRE;3QeqB>S+MoTvjN=2KKs1Rcv< zo(fjfPrPio5@et)Q<#e@dcp7+kJ!zv)Beqk^k@XtSR%Ir;ll!(Gh^9?uppNPw?pYa z%C!GsK5b2W0wH8Zi}mgAaPTYE&P#dKaU`22c9i`O&_sW0URJ`v0KbWwn^WcU0tbs;~2$6v*Y zMVJE_1i+I(3y2vubz7##HUb4@#X?n}6Mk;ZOu#Y;FqY`QqB9H_;tmk|QN8X16qJhf zajjOxWN6f`p`?tB0PF|Pg-7vWUn*?Oieh+b3&aE1#d^kORdY&_z<>eO6l6mm`dbIb z3Ix2lVifR(n*koiSO?=gmx{&gr@)*B|!GQjFa75rmlPEDb>PjD-)FPuh;iKf}j_w|qX~GQbzio<&Du}6BC|DM=P}m&G zLIKDI6a@*fYQnO_feE=FE)e!MdnN-ExGzHb6zj7*mghr$VwCu5GNY_DfmtKKgsdSP z50~cWax3H+-IhY#^0BI|N-qP7^z)j=a`oPC221qUALHhsNLOxA)#2_6AY^@vlkWn3 z;1(6jD&TNfq-SnXr6KCbkJ$R^Eo%SX+%~ccP&s2!q*pLL1A_ZYh;@?vb??;QZc)2c z80^g~NFa1HmA47>@j5R{@ArwCk}n6Tq;q@yiMjzX%7{-<{QHMK;ZwXW++ipkNHxXC zuI=U~)~C1$k*}ZwU>Mmo!Z^TGyedz_RJcOl4NB+ZK6jH#J9v8)u0SLf z2I^pknleFN;-e%fXAurX4olB7s$l^*leC})a_=Qvh-n7S#IQ0ldYGW3Kn6~vyCo^m z%P=5G_Od0Dx_vpz1Jv!yS@QMv&(r{dVoTHMjEb-y+~iP;uo#gVmeLSCDz$9{#S^xyQ+U#SJzxww+!$fdVR+9SI3YZZ-QQ($6H17;k;MM5HHf|zJh zC$k^@wHn}JBaz6rf_#M>H1hPquhn5SZT?9pAFaa8QNvOlzVjP3yk?DQC@Ct~vGCX5s1Y^zHDteRO-K`V%(8En8WT3b zwYg-Zc|-!NcD6l7&+AfErB8|I+V2%oH#2(a$fI3qSbsTWv;!}h_R}Iv2Z@@<7eCG& z{#Nx0-da#;oX`i^|GMKlRqCQKVk})sDdFSf&HCbR)tEzCKmw$-WI-a(Y5?1Dryntv zE(IV38fwXkmH8GzN67|?lw#^68BxdJZzF%p#6pO1H|rn1RgvjUS-71AwQ{*M~T4o%#AZ0k;a z^gpVDoqWr7btv@qlkKV^z94gY@{qn5O`;@G=aR)gRm;bD0>vUGk*6VA%jB0KTAW^i zHZZ7kr~!)Mhs);dDtaL(sV7gY1}dOUXlOAULA_q@NES|6DdUTV*8Hf9p$~#J&4r7| z0SOpDE~bLtvVpkGejE?wNKSgt4n$FYF%eBOLqeZkN<$h65|f$FJt7r}3jOymh*U|( z1Y>PPVC9Qd*Y8-dPjW180h%mQd3$Dp^g(jcxCtZX6Vl?7gm)9Nz>S8ua60$sJhC+u}Lyk5^w=%Whz%*<_ z;Tm6{-4i!TI<<%l!_Z+vJYGuLck7~n+nejzJCCpj>0<-#ae=4xV*z(SXe;n!m#qa2)LASIvX(}S9g839yRph8fnWE*L)>ndVmX!HdCS{&@ zM<17!Rz~1{$1=~pqp#0$Cl|Lg`dZs2bo;8t*3DV&5tbi~`tLc<89covV1l&k+FUn^ zCi}!(_hE~Ox7%{vYI#=XxsOQX+t>136Ayh`o_j^l2T7y3OLe{dMX)Hv?VSU)`CxDorie;NydH+W#qk$;{3*kO6fgz!f_PIj4` zO5}hSjnGK;_zc2EMk0pdd%ebI9Phs!#kbz$GtTngPEFkwIe1rdr$h!s%6u@YPcSBC z_c0?T0;PS0?oe*^&;gN9wA$QZ=10q^0&z`Aq)e`{k>23_R5aMY79SW!X@DN+x_tuW zdaCOV0N5_#ZO?N1a^P`D#tNUgYfCtsqY%FaBwBl#EZoI~WBVDWbD5F-mPgAAFy9z! zm*`H{-MhHJ^8+%N46!^tA?(h~FW@HKPtMa1hutw1_dNu~GBIe_#v4!Z*~^YoqP=x) zi94~fx0w8}8YU*A*FgtBT+;V){reL42zvf-iF z`;byMQcxc0jSzM?N(GS;eRQc?eH8pwbtDgf8+7~ zT)6t8>;@c~UZ?FcchGhveNve_hT-2-=8nvByx?>s ziTP1Szg?GGsy``ni%In5a{Jz3if0SQh*+^tOqpl@ve=Ju(%ZLHF@cx1SGG@m5kj52J0j+Wk zS$BSP7?0}nql0*$^irQW)2lr{IszfH5rE`J`_Xu_f$ae%Fug2dGnYgyg1cJyP-a)X zAB+y>4c+9rT8*l8qXW>}oi36MlPWGb zneS|gL|h=Uq5k?3va|a{g;mf{m44#auDkETBE%f<5E3c?=`>&zLkKMr z9$7(AZxuJNBH$2_ZlW)8utw{3d`6SdiUJ*|bIZoc7$7v}?uVb7`6;Ll9SwUC;t))O*K0S~hj(vx|ChA7BMOS_3knDMUFly=GKkz5lQ0A3q%Fa_150iWcvzTokl={tJCGO#{~E0 zBgw3XZ zINIyw_NwSn3$yCiv*^19{pVipxKsc7oiRj7SkN-hXrzT9hCDmMQFANP^uElnC<0W= zp9r&@1FYG%!oG3nIyvUZsTozoNV$*;i|IF~+aJM3tz$xnR!AP|Urb4TaBp`gUHwyU zXll8>y|+8uF0*e0dLm`e0NFJ8`n}%n1>lKe`?$Zc=H8_r?&FRdeVtIT;%+|3@3#@W z&~M{RqB28FmZs}H_M<0efWuPxOarF zoYfO(VV-65Po52#nFEr>h`MzIVZ3r2-?TkMk&`IfP8)P zuc4n{Tq8iA$yG!GDjSer0p0%n;0LELzH9N`lTf}EU6K<^|b z5inOK_Du$H&E{4QMfzcGS(0uoALb4nSmJYtJOKe54%0<_?QnN=t}HdjFZ(9_{BU-!DY_pkB`b$E-I~Q?x4Hx^|NO#Dn)FKKJ4haoTE*88@ ziagevnShUz&gJW2ySXL(c37RRR0rpAh1N}0{0`3D%^gtKV|{enZf;o-%?pzS0i;Bp zes4FoZd3_CRwP%l0udIpSsE`f_G2`T0y@Gfk!FXc$HbUhcz`@TZj>91%S0jB<`jQ( zk=Y9%z>aA#L!kOwZ%m6qd11|`iBg>>%%_2iJYJG8CZ#P(3+k*t*)J(&woQu>4*+I2 zD^j&`EQbY2?3xypvUtPhFJ}w9Ke|rdEv9gX8!OOXj&g_g6XLNN91UZMR7{X{F}FJD zx?2%CyhQIm+8xY_pEue)GaKazj^%m!1ku!5fv*NK)=HhhX{@$hKO?%$WbKx|?sLn%^7wwrU* zBgVSFcDXkPIFj;VAF2iT@{Dq0>fL8H?K~7AO^BHUn%AF?b<3;E7zk5sFEaya*$JTR zJI*ajWxLfM?@qEU8t3*qV?LKY(cOS+)?$4i)>keH_}LzaO4z%;UXY7i0|tFP3oC*^ z2J})9MGKB0n_Gh0iv06THn(I^;$BC>qO1+d2}kymhs5ZPi2^qtLy@pPYrI=N(42g6 zsX#SE)s{CAyTcan@M}jhBH9tR=|Al5o_b)S8M&_gsdjSwCUU5f1|^qi-S32H4>fMu zucQ6ld-mQ?cE9+~>3%eBdJT(Ydd)ECwP2pU+;H|i#j88#T+cc`R(nRY2G9P18Iis- zqfv0TaLgfgWhl~D?m4q2)nD4s)Yd%fvTWi-oy3_lDl=Mp7$6>s)J&H<$j}Ucjx>+^ zq$-KM5C*%1O>4hRHp8SO5qWe=Iy-=3qOJ>XPGP_@bJ9L4ChSLKiddx?U&h+SJ`g+8HqqMH= z&}ko9Bur~sH+R)!T2;wWn#DefOVy+OR?E zF!a9$dw4;Lks3IFNZ*5a{*ZBynEtTAjjlDDJP-8cZ}UiKWo$p_L~?fv+LPT~urfAz zQe@6-+|8W?L7Yr?WuNC%&5*1)J|ZYES+o7Du_&xsnr??+KJqBT9Nw zQK4Uv|5-F@Q_0a>5W501hfj}y#S6XgjF<$i@J(?_x>0(hU?mRksu~=91M+- zRC64ZQUzkb6DgK=L4uG5KZ#<+)SRj&rk^LevN1IPF1I(iV21xa2d?e_UXg2#5=0$R5 z#00yB7(JO71PV%#?C!xyaT-bV)Ql>9lz#sUB)xl-98_hw^H%yxTQ4Fz1fFvulj&F2 zNs+N~$n63(^Qafz$#f^t3)2Z@`%{$cr{6)H>$$aqxhkgDWJ``0Ms6NWi0G?9ni9s& zJmFz}>ABH8ljNDzMRCD2;8&c(RW)lHGLu=tSHzeC5oUyJ9tDTG%5%g0k~R1p<#_1# zx=|h_ASilk!CC5$JvS1sK{N>2i}Hv?Bee%Wut_>wk|MP;q8zS_9(s}zLht$>MjjLa zR{8`m!KxX>N8m7nqP61`op+wfR$)A$u_Y`x2M}51c$Z&WJvr1R(<5u*E zp;1v|zh>^|X2wEi#D;qT1N%TfQ{|$RP;-zzdLOrz5S~63F((LqR~Tj3z7gUxfOJS; zzVnH@#nv~IMQ6+$4CQZPiV`@)|19=2{>+gaC5$NYege43{|8mU?DF)ZIvJh2i2iyX zw!Hx2BC@0QoYz$6w{?I$+k;lmIECO{6oeU4j2};OE0iMDV>ihO}-NW1|BB^7XMaKmRbq#2m=?+ALZb)~p{STeE103WNDQVFwoayxv^p9rh?LAY#8i{7-TmBwakf5; zI2cPdhRjJsEY?yrNTngTucBCCv{&-DsL~pwd5yS?UkRHSMp%hWRzZ! zC(SS(nGs`7$y5wqM!eu;rlCO&G-3VRe(s2I%x&YzW^T>diZhFRl)1gj%&p8tepDht z_MAjLcCtGp?pxh~2T`}3!DNATn4#Q=JA?7$a&(i4fA0|REb@J}NJ z87DpibMi^b$xypL_|t95$)F3e^gEN?QFT?_OA(9IfyVhL>ky;Qtb-mi#T{8H-6;?@ zApt19j6q*A#Vtp+n;k6@OyLT}1(~~^ap{-<7+3#wid#2@3?ihH{|^GRi$(0sn&kz> zwx-xW`L>wJ1HgO~fOI=$SIXUj`KYV+cVqE91KmwB^JYuSCxnplfO94$o=iVoogS+b za0P1WX2@H}zz@iS$;yl5lOS-5;C%+2@yFz!_K43!Fnig%H3k^Zh>b+0{(#ut9Mh|& z^gBBWK*$|c%#-H>98af5htii4X)ZcEGE!R0*YED{?isH$j7Z-|T`VN~C>j}OjtyZ}pjOb1wwZs(qR?zv|@+{K|#KYBQOnu|0p+!uiclO@zm zF;1r3xsM1bs6?O~?D?rNGQE-+FF_o7AX9JznfJ!0dhn!RLh2w8S_Fcy8DuO-K$pE# zS%}~yz&M0DBvnclY|6N)B?+ROv1Lj3IQSO<3h@&&-_Z!;(=Hn^DM!fQCQ;CV2uf zh5Jm!JT*WF-=u{JNDIcXx5}1Vz z4`kG$)J(!OBBOC*bi(mkg;xGWjfcfR|R_FSa-iOA1F>?rJ7k{(m10BjbdhtK!nC5 zvBEGq2l57OA}B6^H#k=)I|Y*sQOi#S?3l6JHD@)Uej*S{jB|S(DIi9mChQfMWB|^t zlgNnNAZ7_6ZJzm}gNO|guxwANeS6Zs8Rzz|O}8y+vL@Xw3k~_^yl}zQ2Mew~Sa9=# z%TAXxg_ned7l(x#g2D|!sl~x%jWCRzoTbINv_zqESA?aOhou^WQjNjq#>B&{@qn?m z08!i4FZ`W=ufYFZxcPTC6D=&{8t97OA~Z-?+8IA_hd>()+2cVVQs%NoJ^(+)Qw_a> ziDl=Or5_W5(YCh>=t6Dqww1eewzbOz;zogoE z>psh%Cnld{p7S89z~}%Bij!j!oW>76>nZ0{QL`L%9O}Vk`jX?^ppobf4--#PGS1jB<=X>XD=+dQKA+vxB-^R5g`70xg=jg=Id( zJkWV0Td!_(I@Or@XY)#J+8v0}zfqhW*`{-W4UmbzRH1O!6=&jnAfo%#fi5O_f{RZZ zOD36=;iH9l)JV;7Mu^K+C8i94p+*;d#R=|!ksY)!fG>iKC>Vi(NRhw1&JHzSME7sge6=f9jUqxe@10CdA~@9<9L@=dY-j@% zq=Xe3CDf=;uN&ZYB|FIG6A9FxZBzo|W{gUN<=@e$M7)-bic_x8q|JhFbm+voCO6o`Wk|4Ic7ofb^M9H* zU3IeSy4&(Yom+eTW>%|YRzvHZtzo8w%rdZz!MHxOxBFW%!MKxSturxZ9HNix<6e$3 zRF+Il7`Y)ZM*0$MmxSNTlgnWC?64*yW(*J02JD6U^i$o=doLG8@fWbC1;YEn8nuB* z)?Hw)GLt|aMW({(xIF7cr(*Ay&@Y^7^k!e33bzZ8%_%>jdz|K`<#yirun-A-^l8{t zCiIP`xjie|KEkj3Jh;L@73hEdoL+sJJHnpZq7$dPeGg%3s#q9FL4b5ZsAtBeqV)n3 z#zHcj$DKh!>i``3#S7#Fl?-my@{O^_)4w|1JzX|Q6e3_D-x?(wZWowgbX*64C4+nd z2skS1jujA+N@{@-W1sS3wWbEL4$P63eHFz-w4p2enK=+vS1%czFgVhRmf z$eNMP!;~0|36NzT$P#m=FcIJFtO-eipbIMUrmVlke2W^Aup{!P%6ZHzH+=EU_Jz)p zgs> z$jTR+>#ycPn65Q#bf-iu;t?5R)91RcO2*bN&UKHGjID>B=XM)a?#vKn_@}vI$jg-d z0vd+G{r#8$ii{SifaPqJlaS@yF4JaqF(HGd=hhV{&fcU;t+VSR5zaQ zcD^t{DcL>2gh-Hq3$ztGG%A+J63J?b7E>Z(hp<}gvhuLZsw+-g0k#Vbq!u7RzAk}) zy)y`G`7gIpH(cuG>ccK@PZ(21qLN@16IBirPlUceYx6{*99BwirP9dGD&?@; zMQ^`=ElzBKU<{0~24_1t^mbtk0icQkKpE{fdWbILQ(d4IW~WR`5D_u|f$9Ly@$fcA zk{Xz5%6X0W5CX#SX-_1IENo+0TWah=O&c&1@mgUwIar2w-i)6Iy#^@^alb5)K zhnLE7jmU8_DznuOYqKuIa3M}taxW8Ia`(D>sB~8>VdR7-%%g1e8P22qEEW#_d#GB) z(Y6SD+h+m9&ky+UU>@eMAA#`rbnQAob?eVv4E28I#&>mvfa0T{5647zIuos_ukBPv zR+0lO0-awZTz8J|>1n)qQ`SJjw~c<5stk%fY-yNZI=M0ekua{tctdjE;g zo*VOjSa)`L7B%ccTyLM~7L?J&|%uqw4?(z$F&s{|aL7;dZRBo>^sR8W+`kq>SzC3PyLavK#4mE+6%Ttz|OD);!8 zuazsT?!)2pM!CoGjf^)esKj{v!e#E5+WrAit*`Aro5|5Zli%gr+%Rh;mXQoSS|!0v#4+uXpug;Sr{!-Dc_6B1iK2~T}j5a1w6k5V#v+PA}@68VOqe)}6U)M&uZ zMmzAcN^YwxEx*l^k?E!b_kU(7}gIaF@Xigo@ z^-z9%-3!CBm3Zins{Dt5`Lh1q=b22Z0gvn2CU836dDBat4KRqN4HX!C6a9zBB90 zpU0c76kp77Y%mLFF2*KYf-N=bP*i#Teq~ftHH%)T10|ovS)-IQ+_UlKkU_dxdP))#VT*drLvx0a%e9S zYP92W5kutfq5)({Eb!zgt53bsE%_eGl&mt)+G^U7Wx74$yk{7*ZAIxkz1K{)^3Y&i zG53OfGTGSX=cjS68jp8^kvVS7hib_=-o)hmcVFmP+AYHz`n%<1{Dxecnn6H<4kgkm zyZ)HeFE3^%Ok2hht%}Wv{g|T)>al;bF0ue3^h%$3rCacQK&eG6OEgO1u2IuU2o!UQ zu43mxK*f3y|J9^yKmf#?a4WFW=eZI8o85`ybH-b;g868{G%%RlHzNLjGuCSOf=cnL zW66cl2Ua8e52zO=3cF1ND~~j5Tc#6x{9?0=*R-|z*NFdmR;v}6fCt6tiXTicD@Rty z+0h`-+P0QHi-4(gVNG$^#=5p){n$~hl~I*SV6EC@3@r0o+gklR!i2@b7T^C-Ep|3= z6wMY6f5mu3%S6K(=s}a%3`+IElkj!>N3ceIR6}emuTOG&{20Dstm&)l2Yv0SB{JuP zvMZ5Q>Nk`K#^Hy}qnR;BR96G7#*%bt`+Q^nWVo2!=4MLuH7 zgsSy1Q{2vu?D*wQaL`*`lJ2BaSECI1!OeEk8>hHkPJxs+3ys^6w-DZv$IT2JCRC0# z!dnC(c7qdMeaU#|??U~}=J`YV@y;AKsXv+T=I((9^X(-#)FMJgA^;a%g?>H^Xe;u~ z9s%@YNmsu=)lL1NQJdB-p5~Tde`q#|*BG~0&#Wm)rv2#UG=A^v_rCt?WVBLvUk(He zaIOiTXbIbX5L=xK6+K99;Gzu;WwyB*`q}nTY{!p)P3EIKM%IVu00G=Weh~8^z#Qx} zj`$M}v|3Bjv0#FM{tv>deKXh$pD1qsVD7pSP9|K}mY4`eSraI8>~!TwP?$>V7q51^ z_AWPO86gc|=6?jg9?()z&JnI=N2=4jbE>oQN3}RIGj>{RHdH568pADNePN_L;jyfj z;M`@Po#^h&rs6Ep`%Nd>NN0V)bOM!h)_<7p_9%ZU)SQFtk#O211}B+LryDkYr_%li zY&?RB3SE7zn_t=4*+KRJmoE;BrOEz!+_i3(no=}c0asW@AdAq~46+i0Gn(!g;m&ut zl8O;CuT6eF4-62k<)YUWGVTT&tGoHnJ7Nn|J0TPQ)NC9UrAScQ+4qrKKN`N2G zQ6s-}X1cktQsHf@^x4 z$rR4bCyYmMxX|-wxeNM~V*A958mpLCAWqCkzA`##wpvF&-J@GsY^}&IJL`L{|FH;m zys;e#qMvYFK!s{p$<2nx>}#tOG5b=aST+$5Vv>#CL{ZQ5OujZ*Q&}%@Bn_}p>8P_@ zUwwnyn+34+2Dfszpkq*{&y7C>;hJuA_v!w>hDW+&78<6r>)oD1I$+U8q$xXKQTtB5 zJBy?Z52U3%b_+;tMf1s$nuS8KQenhP^w6-2CTf^Lx8gD-o`cKfBCHmCIUiXb+D>+Wu!#1Ye+vRiNmIeY0d z=eSjS_1qZ&%Vu$QyED{%SVv4--AOPlFQ8XJ-9HS|Ot+WMb$jjZU{e*jA$o~B)bvG6 z!>DCf_fLbA@+EAYzLtx5%R;oVQ2XQ;!Jc|VqV>4IC_L0Mq)SEJeSEq`?+o6P?( zu(ho+z4DeHxshm))aBy;fVXFe(5JF61l)$k9RU}`reOt*kRuT5$um-meyY$2c zS5>sLAK1u-d@eBN40&gRTagzk3-wbCKcJg>zxi(A2?LV7L?xOtQB=3t=SC};y~Xls zhcly0+>Zn@XT|)AK*?wtUu+^*WF&36bb;G-w}G#(U*Hx*e>gT#Jh7)=>@;^g9skIY z-uZji>k^!XWz(E3Nrpn0_T?eB-~WL=vr5-B210Dea!2zTNBbY@So+$>d#W+;i8F5 zE{{BAB8tsRR+%Ipa&%4S8MLp%8B=U%LiJoJ?0N^An0nqD2NI3~1e_GjLBLbOsCCv` z7U9=PafD3xOBpE>W|g6Bfv|CC>Nu2V=q$pEHnGz@+#t;dyUiHMXfHA;V#JuTI&y2V zuzQ}V4Eg+D*ug?ewT*tA&mcFD<@fiMT?*hwsWQW(fi!d zUAf-{YsYv61tsn&c3(*Q#muda$Og=Kf!=%vyvaFBu;lghZtca4OF#2~J7gE@$kBaz;MZu04$779L~kg0h~@gl|6qKC&q3)kNn>b)u3i5*=!?Ov1O5 z6lif#I#)jF7i}nl++wf6CL=wdNLIhdT4wsiwXI(p|3(zUicB=X9Lmzi1Gq>hUk{jb z4#`=4<(>5Nnzh;C5BUNKXc0Ud*G8T55J3qa)B_&EwEr>v^M~A|b&aln2+`wW{nSHl zncG0P&3;&9NN4NJcpCIOYu%wot;b=IXUqnc)AEaP`c)>XeJ+S@fSv)1M71|#WF8IV z`MZ{B%z$ONUbezbj&E#($!4h>nC!HX`A-mCyGjCcF@*Jn@TOch7cwLh)xO9FDMECx zF=?SKa3T!Pi`hdzFhRdFTlLkawXz<1K7u3GWBTMr%+P=HhFg>DW9I zE`RrcSClT%vqq^dmr!gMLHLw_(ED*9e5T3Z3{27$$28U?fnGRF41R&w^N>Qhs#&ZPq+vDFvRWi{{wN~l8ADB zejL=>5qH9qZdwL?t$2yrE>qlpqL;rfpinkxO*no6&td+5PSM!ACA2; zji8a$?f>CsNG@E%@y{TgD$>7w#;w|;6?#_?SlF45k_Eh4A{Q2%O)@))x?V_Pa1$NwD96 zQ9JB+CSA#n`<)Civ)@r9Yrn%<#O!xmGy9$1@~m5NzU*xehasuOVr|F1E}B;7(BUw& zFJXCup*fk12a<3-96_*A{>-R`@EN71_FeGOKe|0-Db4+(yU%{Xv=`%flFAipkzLXYwu z&OOq;?L+nz0=Kde> z%!P)sY{hNpKcYbw^I0u!GID?d4LXpN|J|A284Epk64Zi-vd_Dnj?SK52|CvS1E7Kn z<=ZY9%7XGJ7+M=OqD=PGAG-=lKUx2Jm3wHHX-pTKSRMur?6P?VD1fHenWopRax2KI z@%buuPiv`8uZDG+t&ds_BD|%qS`8IgqVHYp?q@C1@2_@`XA>Uwf;;T{)_L~Uapw#F z8|%Coc*vnhh_ak7e8=nwj7Cf!Wn<`gX6kPyb~$cUIBq)n=oj61ZS&5~OXghgzrKAi z*QUqeoSPok-ts| zlkM^iDPy&l3DJU6qMWZzddVfLoIgmWpB!W_A)omV=Y!W?at9o;({n+!Kv>SP>GJkO zVLLlgm4_$I<@&HSZr_V{@jOu1LfVz;c76&{X^vE)lSiX(j#O#!88&8}J07YSt2tCP zd|smTmNo8~nR9!sIqwC>Mmg^hs=;{`U(IT4Oo{-1$74_$-&-?}T-z`&U|L$tF;1{A z+QziVEM&L^Klp4d7$^z)GJH)%8xjibm)YXbZs0ig6&y4T(*g$Yj>Bux%Yd#Zor4YJ z_nfBEJHxsapMeaC3^F9BxUJ`Hl!K=N-R7cGvije$LqQl6gLxU|r>&3(A;Ph|CS}?% zk1xV0sk*<2xJ%9v9r@l(^s8xE>tL|VFM3278(ExU5`>XDd;zeI@C@H7P zy8Q9Z7VW>rmrL~duaVefhW^8AIKc}4@oC0IY4_tanB)3We%+4Hzs1h>m;?ag)~tmFmX>js%B29Tv)G-ID4@vSn2%?HVUR%%`+aJu zm4U6r2Ch}e4!@W4d$Uwepo{JDEHNzdwcMNRXP7PiwyE3@N>KOo#>!g@#D}W$n8vVm7D>tWKhYm^y>JseD0K~7SghdqW$rK;Rsdw}1 zggBj-Ffagp>JtP^l<(F`&5k)?3G3;%avwY`GHK+Boi|}6aNbU&ZPBcuj1+)i)&s}L zw^9ZTle9#2Bu|lkwRKBABQp-{D(tyEw#@@hm~}}$RO~DjEsmH;A)TO_K!AbglJhV{ z2U<%KnIv`6Dp?@p1FBri2JKAMfBmc5z1Acy3aV(79{D;<#-8KMHlu$yk)2+SN~oYQ zfs++Oq(G71S9%hhvYHKu{Fb0E9Gy!1XCv^0CWwl&#B^$Phx*PZaRn8&P@(CbKeYv6 zLR=^jDVGnfgrcCY0N2{8biP~VYY4Ut*i16hpvE<%A;tEk1Z!$%dWnAHZ=nA_^|yZ` zd_pVJj2BmtznMw}G1i-LFcHpdw)lb8R;J6;XJ(69z#+uJA}6AVB&!4BP{soBz@*>a zSiuZ`diaf0RF??8nV$IunRW!5GF*A|^)*hx_sWz0jUaB1{~z*<<`qbh_GsTV3_JWo z1#z{7NY~B!9)w+x!18wBBr_atJ0`?Cd~MdEzBeo(6rn+iKsgr5GbqA+a*yZK{pOqM zNVxEqhPUCu7hDVIM(tvGHDgSu5o-(hS6J#m=WOXR-d~O}9s_S(V}97Q}50+u*<>0Vcrm zjU8GLVpAPiAJBP**jhkPY8m3z>_#?ogUF=VS!}AUl?*jlkrzp4Yq{4IOQ6p)Ek@`rve?wIo+N#|N zRasb=O;ti-T!=SAJ5~91oyg=$bm}ekhsCBcs3KT!tJwyZbg02B0}igZ`V}RWarF~g zY~V4EThFi;RDlkgDqj0e z`*?fM9!QvF6eEZw0V>(QQ4cy0$+z5nx;4N!8Jjv|*Ox?dC>mo)oJ7To_1L%FekBWw zgjq_Wd}JUEMS9Dl`EITd#vzv&Au5-;zM^lNj3lLL$omS^nD+4FrK7qHy}gX_#qa4lXUza?$EsLu|=TV4SLK! z+$Y*@^46pMysc^AdQ@2U{gd>`AGyy`Ro4xss$(}~sv1NsjT_v)C>8&EgS(8&b^qg@ zm|HK_^t$`-it@P|-9a$Pdu@~~p!)cYZn``}hXrWjRV$|D^x#Q-<3_h%sm!0ruq&}G z;{xu#Y$UPN5?%f=1Ab7S|FL@*<8J)eJ)ujGgMB91mdr#TewBD-T5E~2w9cN8&g}{N z3_Np8qC;VbmaiKpW=QOOg>mLP10Bn3^z7ddsd zJxcz`$72ZvX&wN;C0}^t?Icv$I9%m%CY<7p{jE*={^sAfQp53N~ zmMqgP;e2RTXh$^6s++Gz#c>VQzVk~5InB)p;c3JVlv6m_;9^-nySn`i3N{)xKm zukYxo|8y^}y?W9#$C>ICbc#@biu`{5wp%83d^Ezp`EPaRQDyj*k-^UYkTc{^|Dz8` zRC`la4SoFE>nC@tp}&83*v^fXW!Hj!$pHKGFaPCUV!xQs@7$_f{pG*hr^>Dtj1}Rz zL9Sfk{fSYjU-*>XUtO#>db#lfFRd`+@%L7p}Hm)M~4HClu+Stq_@8^qX4=n|Z7LdMjVv zs{4QGUQ-bq=OiTPTIMeyB$m7+7TL{Kf9amoIYYb?1`i~DP#suMr$>BcsP}1Kk*@Zy z`r)tKBeQB+%)(0e4R)rcim%;qSv3g}Zm((5*QTZgU%NlEuGOX6u&qpRIJIyTz8!Z9 z%hpfXs%PhW63#=!WgKX=lpxozp;+cCJ+_nT!2m0wN=WW&+Yk**(!Y+Xr*+FWlJOYpc&`1OX8XGZqh?WR9>Yv>Om(+j)u+ao)h1mRQ)AeV{v1<#v$K5}Q#(va z)=bL4By?3fg!T1#s+^kd&NJ=2m8WW`*^8?ll}&sMi$SQkF+~p4#+-aROZ3cgeQaFy zqRn5$)i^Gz;-=~CadptY#Rk)Ko!PQ7({fcl@c2N#n6E^W?k8EXN)!~CQJq#~3f32?gR<~b1RU+_@_&ldKy3Odi%qAm zC|3QwNkkVWs08wdBDy1jgaYEBHWjP1%=VkbYESQa1KIilJ|SMa2njeK>B zQ>`q(Jv*xkq_JZGvxb?h>8bMO|ebt3Ua5*Ro;6ZIAi|he?14 z|7Dc5SIEJ@k{}Dn5n!~4d{}35$hNB^gNhPWsci!0Uflr7pKVY&xYY z-LppM-*!{o^S=^X++3%wq-kL{RcOWTy&1j5zIs(!ou=KSns2|kxarZPidxn<{dh{9 zTeb}0g)nXtNrIgt%rPe-=R-X>t!CMOeOEu1R`*z6>WMu-yf5{<9_n@bv0^>$SSZDm zp6W>V8GI-ixmYY37S@4zdABE2XtFNpr3Rjr+2mt}hxS(^lng|Av3}8{z#Fi{L*R@GqL)ZVIJ+xymIDwS^O4S*Ku*L$le)^vSRAN5nFWmz9JXz(A{ z9ErGTqQ(jn=-jRLwb3LPe$6~sr1pzCzAe4O0-P@vYp<_*)>_rHwy!$LirxGHV2|m) zUGEICZ~LI>qCM0hmes76?WsOFFcUG0*s!9XPRoHg8AMf-q9Vcw-i{*@OuiJ4__MhY%bDO{Vcsv!e%~-tzhA8;#kR)~UaZp- z2B=B)k_CFx096sIBgHe%iUz9vD!0dQBaHe>7KC+Q$<0<+%N%&Lvj(as36kD@klMd+ z1o=L&vX;Y8PSb^h)E-uB;R9G7_0VsQQkUr5z13H-H@`&?x_9v47yGE=n6T&eQGc~p zEYo)lhM2vrUmFaF9@SMtK>D}!$RX_9EA;P%s3Cnu989z;w>omKjWIGpJ2HKg`6DFL zhXDz>dq4`OS-&xah4sAdH|HW8;WOuD{C4egptE<)1W-bvru5PmLep4Se0z`cDEq%iX z)zeuy;~FQWB$ZG(*@G6oq@Nw3YV7A8(qHq|es+^i?XN2Bb=T>U`>P)ILxuXB{ndxk z*CR%9&-K!gs;gXY9I39AYnKD4J-AltpC4deHr;Rl^J0H;qyF0{wXR}TV9F_-m)yyg zzhphiv4rz`z1mkJ_FZU(u`VZ_FC-ye9kEhzyVgIz;6^$h5o8y+k)vyc+$1ZO{(4bC zO6k2ut2rDno*S)B8x~0^HoP)9Kb~j`qPLJDfkP~`PM*i8TVkT1){y5$gEdy4cA%<> zUzZSAHsjbq6{YKZ>eAQHrsfmN4dCF^m3 z9FN)g+GvU>V7hXQ+Q%wuI%W)Dwf^38%^|AN68J1URLv=CeU^deWfYn0x_BD!?wG?= zAA7^IO;;YK8ZGLChl zfH^fstDe!>-%M{h{b)78wpZMySB+JJAi>+ls>6z!#qqFl6Kg8%Jf?p#P7Uh1l1E}y zA2;GKLRT_5E#UC4rNq!^FZM#lS#@6$y;0lX`k z2K)}<$?A@{Zrq`aXE*hBCqU?k8Hq(OwinQ7yk@~A;+kr%?USE2KIu@R64ohThfRGZpn$-=Zg;t@fw|+Z|5xKcgdqWQ>2wvtz*+8*ze)_Keu0j0^}8 zT5ERnq%r=C_>tk3gs#fWV(?i`I)`bK6w0|)B*%`zhY35vGk9NK<%sn<+gdt*kx%j+ zKZ2G=AMkZ$*+mjNogAmK`9;kqHBs5O*fAwuBMU0mIXxL2XqC6?BIBk^GTJZd)R0Us z=n^1CFeP`j)FqpLradBku>%Cnogy#51;H_CQTUo1VgUf=zYvsmpw+is3ln2G7aa+?M- zpVuQ6GC<+PE}xsp7#vJSo0v8#z8>^#9=Zro7C3K~?ueHmo}3y;b=&czffhR;%Hj?U zLOQ4ujVQ9ronxdxsL_&@wn;BJTa7@8s$X7Ss`U1=RlZoO4tNZ zvYTVN74JHhgnI3mOgvIzf#QjEAPrlKG>Z{KudyjdH)r7wPeAQjFf-jw~(pB5y znS+W1pcC%H_HUQ-Q-UK}(FJONP4>Ux7pR_${mctg=Mt2&@qyMsCb~FmwZ8fSRh_dv zMvADW3skD8F7GGwB^PqeGzbYOP_v`+7GDAyGRWKDz9Fo4&+_u z@#;KIyH|~;o2&GK@v65yxkNudUiFgemhtdhAHSuY32Ia>8XcJ0<0mMMKIY>Is((1& zt{!l)+LumVa4~EgU(dW)4L2&EJZGhT@nY4-dR%{gG2^(Wsrx1B@~GXqxM|L%>g{Oh zJ!S?#atT0hJNM@4d8PSjJ#ubEUenUc)eNgJ`^=>eyn?!x>IqjMH+xuLcZE9Jo>r_s zxk4Q**P35)u9~dR`z7HGC+kIA`kBL?P^DM^mB65feM1gz%YhToBFq6$mtLuk3>za5 zI|n>{(UtHT^Y!&tQtN#E#Fc7~@I5}9@_x&ez-mgduJ{$$eYZa8SA4!oUnQ5Ndj7AZ zi%rk`N?iko@~l4mH}K6f|JXG7H}Zf|!ocY6S24%y^~5^m)_$A|`-p^oGh0<01yMYt zo+MZm@+Z0r(%?Q)ltO(KzLG^`roFWw1)t^j9B($_WLz#1k})zoGK8;%MIIIzEDwf7 z_TggGB7@|9C?Vsp@9t~Y^0SzJ%$hhgJtD`Dx}4!wp-@IR^%G@f znvGDB2FGDYYNBUSGGG?tek+lqu-Bnq7wc<(rz&eNluc5)73yY%vgC}< zuZ~ut=lDl^vF~3n*}nll#135foXb-AhN>)8%JuDISUPuGuWGxDm~L1Wg)f zt%R}<=r{m@O*7j!5*5ZQsKyn!O4#Su^f9%nLT{~Ao${_RU0kOp)vBt|YbouAZ#8YNQxhz^rCv{%3~RlrUN4xe{)`mkqABXP z2(~|*qQ-fPVHo?`g-IwOU}W;(>DK5Yr>YmASzV^7A%j;(#3wH!Nr6X*Au^HdoEeD< zQ*Qz$%Dy1MSffalrc6^k@}_evaHa){$T)uFrI3r6det=bke7)@UM6iCy*UC4ee>1o zmz^@nS*mL66{6p3Y;3B!2I*Bo1T{!8DpCEMGhLly-JqYFt|k=RW |6(?_ue(E|^ zrjNZA5!+n-^0n$Fq<81eP*=O)?@}l|`!F0Un^B=NUw<0dMol3P6 zE=Y`C!4smNynUTIV&JVWa=udjR4$^w>!90v;}zbO`Ok8ZcaKDgPg(k>(xcIlKRFAj z;ynG;Ea>PYJ?Xj9-Z9xYA1l>oJz09RzT+l!CHqX-%}B7nY?^p8@@)I%uk?c1>azSb zkz+WSa%j-q=BTvwMUy{A6P7&J^mK; z500hdhgVeTpWUh^0)f_BRrgNUH3e$Pt8=WVkgPgOm)@pE*M6NBtkpL5{tKUSTpc1&lVby|6WbZkr1aYmp3Fd`Rx%by zwrhU%vpIej@&%{am|rC)0>Qi(hyRdBNhcsr40&BKY@-mg5Cl(TQXLor9LinD8=+`< z&Z>+gBlGG~UWw*3&v_!hO9;|TzcSxk$pc^;6K+%1YqNgwc9ra0mnY3aBN&F?cbgnL zV=_&-cc=^cezFlHtMp&tB1m?uWyvHuZFUJ9Z9uGBV|v_ECA~rCC+<)e29^SS?o_G3 zQh<^6g`Ezj00!_ICdBMs`usap`aotnM>d

1u^7YvEnM{)*Q{oD5uqwJ@-zINZC z_8`7y_JvkW&ezFfl~-FG83oPbgMQI{Yodn=5(5zw@dl6NoF|5z)Olh%ubdbc<{xT-1 zZ33;o4#gBn1igk86;#S$JbUMJ@1^_Zc;V0|XS}nBuCg`@q$-+vZ@wCBMi6| zT<1yddf>@kl&fbiP-%{1cogU-7C^;v_4^Cd$Xat;GFY6D0~(kZY#)*W3Yv;>gap;9$;4Q8)`*&`^%x;bOIc zF!$uZO$8@AHcB(la-NcP6VfUnHe`~@^#i|Gt83+ahUpiL`cwI7dTsmLK(bQlw(Wn) zPh_p~Q&ufTeDagE?YEFFE4WLNzmf?xXG44;_yATdx-SD^41>(YK9cuL%%QxKz_J*m zkK$)-9sga5Z7Rh@Y{9%CZy0J{927d03x!VPVuRD9XXO{0FuaZJ&(0X@*gj`I(vul+ zP>}cB_LuNzW_tn++kc-2^Rga{mZocI8>1WPuRT*Lg<*oVu}80m)U1&QnZ1^gz(D~e z$-@C~?S*nzpSfGd9rvRq&^h@5Zpd&$P^{;zY41{e=j3-zjr}QCzZMzI!}9%9EG;5_ ze`p#vEl5X=aUUFPmBd%Jky+cuxYHKdfU}B6!Wwnw0G2y_EfnS3(oNQOs>KTG3)E)6ZCi+xwfY! zAw{O#(?hb2$S?Br2`0#x8rpwyLe4y`g_{0_YBRuDA-@6c!(o%tgZn71t8@+9Wpf>F zJs-YbiSAGWM9S?}LTR%Ojqt=4d7!`khPe)Llubx=1`k)W<6E+N^@X$lGR_(j&+mIB!ec%gOaec;|lueFS5?=~z7+YMubA+_@oN=8p`nV)DO3rM2HzYiq|^ z&5(o!tc^PJX%)VhdSMV4a1;WTgCh0qOzf@iHus&@sym=iM%D>-v{Z8X;Z9@{H!ksvHltoW~m1YTGVFJ6AbQU=ubA}#htI=JQ?a6PyD$2_GL)yxIH=7tBn4h7@lqKSPRF$Ip=BEzAiQbS((a z?~vTI5x{jqRK+|fB!D4dTf<{nTPx*Cubbrxf!hW$329r;HAMbs3-1K^Z|9n!J2lb8 zQbLxRpkH0?cml}*0myGC!nSM5_f9gkD!VQ%{# zRcD^8&+R~qT61fnTPZA*ffJWtH2w@?uDx9D#AuwW-y9q7cgA&2cE;r~5^%D(sEJzMW?}QP1B}@wNlQ8e(7=qzVfKNy%hLVWYGS z>CfsBlWBUywXH{UUcB0&TcP(qSR3{Z@Bbl2=MR*4#DKDT`hXPb*zK|;Ke0rDh zqrPwpB9Rs5^CRKwpq|q$(E(xZ&u<4M5jhk_nF=%78S_p!KqL=Fm}{zlb8D+&o~Z&) z$Ecylk`YxtT^zQ??%ww4V)JPspGte;lptRknHSf_tS^_FFOh?rFWIx2<;#|^PJ|t8 zb+(u<3CSK!_75s$=V$zx!DfAm7=ytQwauuz?!RB{l?_P|v569G%l*g~fvEEUHZa@| zen1T?Y-7@pp6eTLh?jM0l~%TftswMpKGL%uz}`*idmm84rGkGvppHa@F}N|%qy4lI zr6-T;8dX>C&*WFFj-=(>Xh$sJvZd(<8?g@iNark5$7S~nYHa3s{4y+GKGOBe)S&)Z zC;(xMnv(@<$xQCec#H`}h{zmQM}Da{E>l0T)x~<$gQ|;MPkj*BZz#|Qd|2K!0g{Ta zE#hp@PduoWTO0J{52?EHH(W~F1UD0;u_rj@-g4g1g%4voutC>9to|rbvK5&)~1X8ZtD%Y#}lak*6Z0%sN=b8ctYI=5a+HCse^uOg*qcz zL*r`0AJ_~+B=Zoayu8FQ5{^uLu%m5h_W+FCw z&KG)k3nK6@^!yg}YFXW;83z55_AzZXM9$;-+qG&^Q^E7uPmMv?mc|M!uj% zV&iz#3+l0&hbCjaUGEhrK?EUUQ9td^z5ZJBu!D@}<99iJ5AdJl;@qS!c~PBN^9ox{Mdtnps?APZk1R&U@6@mh-I6c?q?_UGJItJ~8z*?fnvp0_*0c zv1_m`RV2L19nG&(c%Bae1Hr1N#1L~SEHo7t98 zm`22tM!QX2`wzYcpIVN&J%-wY1?5GkAZq9&Ctf)Ke2WN51lf7uf;>lnK~S*En!4_x zH@>A_vmVmVysfUXH#F%{e^+H>{L}Ntb}o;7-NX_p)=!V^JWJ=bs-l|r76FbMJ4|;s z|DFdqor41%rn}t#+dZ6&U*r!8q$64(E05}lt?D8BtE+YIchso()z`4Sb;2d%0YuXJ z{&&>=v3g9L9LVE4=m&c0-tV%0ZqSFls|Nk##zGlDe+FQIX2|xDoXAEP_oa*+>-#!& z$S(s6I!i?OFdxFCo^15n$(%qPg51NxR4@WbSoJPimFM)o-c{F?rbH2HK0j8fcmA$# zd=G?bZCd)C%8k{&^?88h;CP7ToRb&6%nTOzS}xQ0Q^_YZu-iPso|P$K!c)84U*v}skeWOUCi8Xrs!UqRIjmpc?M`h z_X*e@0#!$Ts&Ctbq17w* zHx2wmoo+>6*gjd;Z&s@cE~Hc-HoQrnzeUZpKWoz4x2QL2FAXOqa8S!}G+FTsE{rLP z%XVylb0wwg5C}m7jVOX%v5f^}3uZN|OrA+gEC8J@@^g3*Q%KmGJm<6T7OF)_F{UVn zr3)<~oIzKPFd*=TLri7d+2YOy6TW{59yLW~*o&>ATmTVubY7ZFU%(91Ly{v~B_~!c zIVNGIEus)oW=kofg~^iW1fDqB?%u`S7gon)pF`FHqih5ffj=FJT6iH|!reGu(no(i zBY`3rhG>OWpQ_vJ^KpZ2U%>-y)SrKaQhG{>?))_>S*~Ng zR;QDRO8?<&HL+`35P(cw4bTJ;14+tRz0WqWJucBd-KMUwr~C@ zpzUxIU+L?&tFB4RrHGRxoU`mgkW;8EQxz+vvD&m8bu{9WBu}`iO7Ubn9#V z+PCOJzt&s7#k}&CIY)WaIcJ@BE+&z_<#k2*e1_$vb8UKZwVrBuKgV}5m1rCE^jvAt z_)pxU3vF*qcPyXUMj*S5EJ*wY?kScbIN%%hj`TJf-)Ede8CZ z>!|cW&&cu4CNkm2IbJ1~{9LcH%d_pko}NeWNNF0Ykpec^k-6S4E1#0QQkljZ1s09G zPVm7~dQGm^9sADBxn2dAM9k}H&Cvs6Je;GCiJ2~481sH>&(6~qbaQHQpU$yzYl&Q2 zRaF}iGG>WlBu8k0$bCb-hmnMfthMbPvLZ6r%)Q8~aR%(mR6zbgl8kUX5RHeG%0e~5 z?o2UR_dIEhNJaX(E33NK+M{fQq=C{jqf0r&Sfg?v44b@evUPht;&HKQa$C$Hvr(;G43&IMH#r#Kgpi+H}jO z#p=k#;&)da=?OE$wnoOKq+Mx4!1+j>3tHqMKt3>r8$Lrb>}v5e^XS z_ts^@A`!nrSSe~ur%i*k*iRTRe)+tx-GJ8Ac<-r8PONydo@@s z+?DU`)#g4VB6(KW7r5!=gGS%zukt;-@DsXc!aJ1Zb4kM6FBTZhiOZeR%?Ynt!G`=) z9$URJYWXPP356;s;L@zC3%oHh2@Z=!JRdBxuFB4agU zOzFiy`N@Ecg-9WO?~SC`PNu^ep44C;4|S7mRD2iEhAj`_UV@s3vF zRKOvc-O|(#o)8l=UFr?B()y%Q@5F(Rkp-?gvf2Xv!M-L;AzXFLnN$+`Ip+EEL=4>5 z9)x#6YpK_x+jiqkEy}xk-Uy)!%0TOKoo{rilQ+PgRHDaq@}AA~(15+#^+qy~i(RkB z$d;Y)fF=HY_riND;EyxgE5LkNbY-K6eAE+^Wrs6#AmZ<@Uv#|%*n>_~L|Xq?-=w_$ z)-3(B0`4t(y#nr97kJ+IoX;>w)Z3q`9Nl#LHJt`q_8p(;SEhH`%fA0JoqH|U$3N5i zUfXF-(|>v1!+Ejow_`)T-_7-X2iT9_adYb13Vmf)?*M$~mUs1%IdyqhAg$}_O}76~ z*mPz$plm&>FYWIA9E#c6-Aqa%=?&|8Yay`A!ywFt<3^tqLMTMQvy$N0WIZM6Efv$R zy;I&PVnK9c3R*i^|1;&yM-{cKuh&`Mm-Y&+x0;rxy%`qU%Y;3=0eWIj?;!f~Xispl zPXD#1H;T^a@?PF;c*Z^4%bQs7>>Q%IWl4?{E4vZd&2l}mH+@{FFYN7oj0XIKKHf1P z`Q3fIGif)kuXj(-ZV#G#y{|VXY;so5R{RG8zOpqjAsu5CB7`QD9h2UIK!=43Sdx2s z-D*1xwa2pCaXC?FC2K2YWIlz?yWCubCMcoVkjWb^a=4g?Coylg z_%PKOa_2-1=m4IRRB*pM%HjWfeMf(oBB zdeEOZ{SiKq*J5C+E~d8zis?ikT_aWpfv1E@Y^H${6zS{+)R!-H%3*qI+{~Kxt39_? zd;-PRlp&cR;K^+>3evq(9!Epq#TPiXNklZ@RF{C&0 zD-$V`V756Si78F6Be)o}Rj?3AX(^p2Ta<*~I7~{er6irh%?7joLBf4=V(n`a2MJL; ze4yN{Nr+)Axh}En>d%<_li@*Z4>sqECXzTux@v$|F>q_XVHV_(A$<}pD;!#$NDz=7 znq_PNQJ1erRD0!wYtp|P;B_vSL5JgSZXbVhLN6WQrE77VpeJTD0Z_0mAOu%1cp1VN zFqLtJt}29~i?9V7sq zZ4B2Ya=NHxcLo2A@27kdeDOyj8-@&ml$Ln+cKT?iaaG3%f!^Ji z(r`*;rX*kV2tBct=EB4$yVP(9$=`8O@EhOjXw0X$C~JG0j98W zWidG)MNMCrF~X3=i&*^fWh3x5_!dM3Giyt%Hw;kQdQls6BkTp^H%o0j+9JbLL2<;k zoikdHA|sp|b}Y~oJw_obn29e8XiQ_QjmFH32^=A?oCd}ltDU5X!)abLCfJ-6l3tTO z`RG&rRZKz%dcT@E5ehCxUgJ|}NK+jSCb`ZZ4M|HTam>3spi5EzLqti=8vV{*UiUpE z$lOi{&YDEh1n{qas6%F^ecPKEN3#sm)3^@?az;XUu z3`xnA1B7eZYUt3`5^0MDsw0bKo5DklUV>}7`w%a6;AW0pDdGqghzSkEOZNVJ0Le-e zSLuLb%=KIUWeo4O#b^l{7ud>$ER~)z#2b!0`spFwAac5F9^&m=TWDCSG6BatE94KP z3$0cgpMntyj1`m3H$+nk*{Xm|0c}#!w)CbGEJrjd9WMxYkSLjDNLHDuNV@PNIVqyT zrwRgA#TX7TLJo)t>cD5n2#q<1n;xg*;rbqwj0$SXE*2cvX4$d=MKD_|FVe)(HcU(s z3)Lkci0pBT=>Y%?U@tJh)Kpax8L?qHVe;5Nvr5i@mBj z>6kAWYlUoi@Q#6!-E4Wn2`v}wY&LaZ{!V$JtXMCb^2lK*6ts#GEC|Jet}lkHwNWpu z@%HHwcF07KK$d7AM;R7`-csY0*S1Lg>;uF<&-Ai|R@h^#Kn7_(HufJzy0XcD z)Fjc0%DI9T;l$;PMhh|7UFsnjQY(6WgU;*aic;-;xdP@B*`lT4#kFFDzz+;O*j{90 zOLEfYsBXSnjJlR|3XEKMb*@+^1eSp`zMQQ&&{qZlGGM$CQh75>BAbfvyR7XTl!)vt z#e(0(L7HBcI}#NFIy-fUve+ne;eKA+B74wY;R{3%FhG?B!UYM+ zhkNyuWPGU@_e>d`ygFGqbt+gX%b->3;od2O-o+8wd5}Mc$eNBWdg{PPr0xLcJ;gN= zIaPFs(WeiR-;v1X1DyADdW6^0`#^~e1fnePU>_?ont#qC`jin~SMNjfN>q+U(r8{i ztgj#8b?#r6rR>|^zf}}o1gy%^Z}#>dx&i$rTCmr3vZiZaohaxwN~`-Tp0pqOLce-{ z#UT6nFLdq$6@#LmelcC&o8RquZ8uizy=UVMS3BbeMk2!_#eQxUHjIwtGOqJtDl>){b$Wv z3@psuKkk!nVPE|i?>0U?RqhH4d^i~Y!x!r0j@+8^?tvGk$X%kq&*Sc|E6rU#NkO=~ zIk<}#_zCVBgFCYF$-CQ62yU_Z&EfX`w@ib0#!F|G26s8Zr+@g^yo&~3zSd&y(D}-j zA6$z^y)dz+GhaS8MZc3OajUD1`VIV&G)Jr`%b&L@Ff=xPjBIra+wLQJOug3wQOHI0 z-d=^7=ocu{rs=!uy@PG8o9ey6>H2AbDjzBc7MXCiVmOi%?IA2cj`Ny6XqJbU@-%(H zEbkoDlB;HU4|4h4P3+V?^#eB{#=}8(l3h}!t8VtH<>rW+y+07UYs<~v{&)V=Xk^HFYnd<9B*L7BYDo_B4!ecZoD4(CBze5xW0CdH#mBP zWohf7#+lYe*m{9qdzs%|e=x^8kq`|d=OQ~>q0gA>9a8&WnXr3Uw`WYa?L%1R z;)F9(?q6okFqP-QWx6edf=#(+LReC&`;3Jmk> z0^eon0W#e>WZLlAbbS!=b!v&CI6yj$xJpP}9sCr1GDDy(saK!zLW!%lXzvQ^&-%i7 z-Xc-&m)^pR{8=A!i|56k5+flr0{W9Lat_x=Ps_QqsrRkkXbT`-cbnHG9vCDr1>B{W zu%)+o{j0qI1>ey~79Ia3F3w!9Abh%6o+kYF!>1qU86_p%%5EkEt_WX@X{>>w?Z1kC z<#z8}yERYua!N)eTZn5Ksw<=75{=k>s3^FFv5Go;HO(pME??j6l=QP(3iZoQNm5^W z2kNAS`i48a&hgcAAs`V%cT4Z^s;$51H}CNL{5Ni*L$=e>Nk8*(@oDjYK1wtYZuN{i zy~_A!Tk3+lrE>S$a&)}%ZVh+&k312)>!j;96?fV5)vtn=okD-CP}RwNh=$ij!|T8M z63nfX-|sH3I{wg#u%(~fA%lMw)~=wFYvm1?VsZB3r6Lz*}SMnEC2_V+)jLlC8BTHBms<|Gbw!`BBTKFvDf5ZSQ`H_rO z;s=gcF-ozhzHy;e-8nDoo(Kt=KQmAJ=Q|!jXQyNW=eYf|K^`_oJ=Nep6mb2w}aWmwg!X z^qEV%t_k3rV`#;z`kE!)!PXl6;u1*W8vV%gUcb!;-c{DWM1WXEN&;5 znX>eV2fTD8m=P)T0Fd#AFC=t(#7Kq-RJ8l29FA>jkuwsD`vU$Ru=0wbTKqv%&JFRBluo_shz-_2QJ^zoi_kgdW==#UA zo8+b^o3b;ToWusp0RKQ1~*#HHWZ^Fn|pXats3RHP!3_h@F2>&XUS>heUuxPKgDS>7R>3Lk0Rna z`lz^3+pp;9+XWltE41M=0M1=j3<0W9>a-|gN-;Xfv}I;kps5+wLX0L)F(2g*G|C1% zA3LzJG>D7r50h~igFoaDi3dLf%kTor?X}RXl=hy=DgvEyVazIVD+yIFB zzsnvdw2sm&EjM+s8O33mO@1_!!VdAtFBr=B3tz~BZ~SPs(&tW~Wj~r#LVgP>p-hA5 z;*VefzmsE$Ssg!hmY7wU(Y(0CY#CQPLNUInIEvvh2xuVCVAzLg;}Y}VNNgRu)ND+J z?c$=S_fm6J$uWbaWDh%t(m8iIy`2l6`ZUCy=9;zR4I z)=c#F0y9iqZq^L()bV2F<<;fp#H3Z(iep2|G?s=TWZuP;JK9{_`2)>dWtMxko#7rL zhrB~l)1A9~6k08UTrMVuVwE zGLiJ@I&J^Stp30NeO7=^BPYZPGuB#B09nOz9StL6SZ^?wZPaf)eE+|lq%rHwME%?W z8nXf(?p<_X1$Y%;_SXtCjbT=4rCIO4!0eNiHq2-x*xM-jX{A~FfdSoGX%0!sFM-&P zA^!lel&4C;3v=CpxjM=M%Lg_Zj)qD0`F{Xb;%X1D66w{|W>|FDC1p65^J&Q&k&G3l zbildL8>`PCjD8S=NL}b;&_Tw^u|44+9}Ymjha=%n z>&>RA?%^xZOEY%4_O-q2tSWdzRp5P{xWR_+zc-jQ8vM`u+O@#2?z)?4uIMB0r1cvR zbTNufZ!oJpFo}eX<~K+UFk_>+NiRlTgiU4~QqK(7WTrgh9wZuM7^@Ql(2`2_HDp@8 z{1`v}9Pae+eeE7h7K_3N+v4GL-)<%bArw=cFyR;Jo6YnG2G)DCSy>+yLfPBRXm96= z0t(q-%IIxDF-l}t`-1}-)cFM`HhGgB_9pkygSRaiv0WLYAqgL zHQEJ2hQ|rZwetMCez_LU)sHXNVrauI$nXp3?k?c-2};^+?#g`L78o*Y4M2J7>|C)- zLXEZH_$*?=(vD;NocJo3z1IqBAtYGf7O4=n@B>W=ws{CAc2WSmQR&wT{N2`<{E|x# zn3m!sVG=6QzxP0i-9RJum`R~oD)UBrwu>Ninbz(xrB*}-_LvnT`H(Nmboi8I6GZ;% zRQSA)>40G!QNI+EXU2ppk<7U4Jqv-k?CXi5mU(8|Oyp}+p*lWB5zKks&GqIi@vz+l zp((1t?x?|nJ*c~}hJyvcQQc)6Wew`a$LdwIHP;!CDYp&y< zy%Uf$=!IXy0y5o=xr3`=0kVUuT|)#f#}L&NlXV`cnlU8D+1%hlmgv=~F&|^AA7Fye z^CcSF-`(v3!R&x)ALB0t6jnU_aa0=n5E_d=TgJSc3rA4fy=H~bbzGbE3l8yKN1yLC zqib*DciX^=He#Y|B`e_VWa~l6T*mqe7KgvWAb1F;Y3E+ELFPFwWu?Ohx71;?^Vn7A z+w9KmzSnskn5?Pt0?#Wv=X)M(Ss8<$?RkarDV_%`4U6cIpq2S5LUSH3W--ut2tNjy zL8C?|hIR7THt+omOU}Y!qt}GJ!nrpm{DPp$fwX%~Xsq^U&OWm%vW9ls2YtJcKHF!u z(XP?9ec*Ou=;}T*yW?NMjDhJIOn~afDBfx1d;oZt3Gh}hm%<3favivXN?W8kt3XQI z;mOlE`eEp$sL#WmPv!}TfHQ^CR zR*329dMV940EvB7?%@M${98#02hB%d?tAf|`6JpAhs*@LtaHe048!2Nhs=6nxj(1w ztBs7}N{h%qm}C%=;jBGmJ^`a|$YBUN14AhNF!*FHEjw(sM=m(uBW7(BHaucJk2uG% zN6ZFMi~ZdeDxH@~Ll%imbmoZJ318Jes(StRQL_S`KI5lcnsC${jxnSigFv;C-aLj- zMpz|}nN^UdtKb;o4Rfi{ap)4c^zCu8PBeoBUUH-b2m#n2uK=~+IW}_X#BsAecy>&o z`8Xz{yeC)>Sb=q>`(S-P(1(R)+v+G&U-G^I79)yuiE4AXW*>xAXA!Vq>J-6kDk@Z> zK*$NREe7_?2@u|?H0lJ9VG|ucfk4M4lyVYa%%vVD&2&V_k2q;|M$O+&nmvp~e5pHc zMX+9HDpvSkr_|g(J%vVZ(O5i1u10D@l{v+m5-P5 zHrvI|#|d56^kGLraElIM7F$Zgg*K}x@i#=RFQx{+nJqNtpT^<4ZL zFnkkDJ!e*G175o%;)e^e8#L2YlGr7e8q4Bx#TaIq#C!W$yo)0foIK1FbXa@ zi?v@&XU>|>VBMOZGX>!C?{j8ryY&plmS4`9{n2vQ-_4HrnftpL8)tV=$gt)fm@;q= zpd9Go)bBv6uADbl;^opm%<@q{2H-4b!3QH_ zg_eB_7ydBoKY`h!Bn~CejyWF-?oxLqF?eem_E#x(nPPi}T|LcRJvEsfa(wp3UXFJz zfPrnIffo?LyNQCGweplQ;iOVFo^b7i z_CoNXbBp~Kyjm;mzu?gN!Tt-*tnv0=aAb|P|58pYBxJRp;KYJL=3H+-!5qs^2iDTU zt7hX^R}NRGvPj>`5;)7ljPx)r9P6u9MzX0If8v0~Ptg|7!S~cg zGu1w@n`!p}M5s~M%tG{8w|4X>4JZFkpNb z8A2T=#zpH>cTx2l=1&hck=yd7`JSfb(ND!@^LFdDd||-44v0_Rg;mgXYxn zF$g;+o&foABfd01ubj*6I_xCG)Ao`l!2Z|)YWWxF%TM(FUuH`!H+SVLl- z#b$EcF*E!N_OM`WY@w!ifGhpz^E<%Xt(0>I)CvUh4$$`}3cU+zwSpE>Ogd$3NkHbx zyXJ0fTW*(orc=`wZ6~@9l7EeUyKlattr^`;)Y8_D&JxwNb)y%E3fipE`$c7K@#y0sdTzqyIGK>oELq%r>A|JMU8I$PkK)_=+O$rf$eh3sn zQF|~rbqAs{6yrZ1BQJMHpmo6UafnE<2a{4}9O?{Wu#ra?$3a>UD(d+vIMN@XBEjDSukbLj>~DP*^U3V&t#m9* zYy**>Wr(Ns2}ei^7mr{STZW6K`uZdEwf(p~T)Yr@asymsno-#nnGtb|=*UJ9qN28j zUW^d+^^-?vLWFn}U!RB&e9wApq<9yrJ}go+!=)R0BE=&TMN1`3~y zQiUU<(CyFkYm|5!K9$Ezd=IV46g~8#f6^&a{AjnDNXvw%3)jd+A^Jy;97BdPfpOAB zBp{uEssks7O3^_7aSUyfYBm^8B2~4-j=w>{g*6l$Ey^K-UV5~s=&#Tu1ZeVHw0JkJ z6q<|)XQolK2s?rQGk&E%qDAM>KfQo5h-?ipZu=Mqs9g+3+bAbiRMiS7Emm~KQUV*1 z;KmnAIVe_iz)~KFsu2a0Y7&Pns7=p(HC}wA0f>bOA`Yt>1ec%11%^oQv7CNvmpYDvQtD&3QiF+_)wT9ybTwxrHTsFB}LSM(`!(Q@YNzbyUZ)8 zJnI@`w$juJVxUc;q5-&?6~(hA&U176j08kKsM-4^MdN;msTIWpq_}NbNpu8G46h^( zBW-%`RPiEtR~D5+eb`ap2jYS_bED86Y+7Y;LPsBARYV6J7~8#yc-4RIeC*{o^p#gd zyqGXq(Tl!HF>)?c+a7RrW;neNA2cbqLsb!}aT7h$#An>ZsWef|f5J>O5s4A#7)G0{!CT4J87 zn{G;nb>g0On+~aNOkk9(Eynq5^+G(`(%ND#&vHN=yFcH$BE?>mM6^|-uGo?^IfRwS zRK1Hm=pxmcB76W<1h-aCtkp8;Kt0h+-+YX$`f5WzS6^)RIT40?gnHB$M13lXAS$dS z9Q1PoP>%u{-%$Jo%pBcFl(V-_I67S0NbJDnV}lxtov{;fPcO%?f*glFxQY5S5&5o7 z6qXdOHc@W-reeB=hW~6Pj%t%=Z*#E%yJm<}th9H{HYVygf6f23!>;MtLafwW#4q+y zQ63wo*`sQ=JpHIx^N-uUT1)Y!f8iED+aCk|uBB-1KZX&DpU$@wkA@X)R!>_LXSxN19B5$rO=mKU;x7 z?c*Wp7-Dj3@sj^MwSB*G(EZjJ+w|6=GcL#~ZVje>nkuycho3-Q+lWWJ2j0%1sclr# zMQy~JsPueWQ6cb%8terGGti>8!t`Ty8Dh9N-`ch!%mz3z6EUoDAU#$86aYWjR(zo8 z=f={rPl#5I3%DQEDJiZ#MzHP)k&59@=_DFch4$h(u!k?(i%!v(cA`#2rq(Wh4^wMz zrq&nRgId!rO6>q%aFt%`Aij#8xC^zb{$uT(yQq0bRXeMr=oxhUC~7AvPC)C12uY4k zVyr!gm)wI>J1+i7QO|x~mj*v6l6VI5o)j_syZuQ~k$ zbTN)QQt5X28Uhv@Q{i`qlEab-#39yE%5@R7BUa)5WjOqjkV;wOa$|MwYh8rYpao<- zCFhb zQSp2j?Mat$5w>8&N1K(8-v^jg+*Q;}{=>%lNLLFB5kjsx@3P>BHi{yR3)HroNOO4jwXg#%`0V7g7z$89&zCf=)sZf!^u!uraS$)=uJ?SYdha}s5!>HKzQjXN1_$7z zYY3Z4mA}a4#bt*mU+{cS|9+vQ|6Zlu&nfkOwf+9Y3ndU5@e&qL9n*Z?JyhL2)Q;Ut znjG;8pz%R>VYtWp9FK<`+ia|Jjdxjh&v+ucarx}BiH|-CHJ>&Jb!)gl92jNFLJ&L zHaN}mI?wahZB#(8qAIV{6Mt8-|L%;x^#K|`nAESQ5L!dKU@ljxXa8Nme>brI_IVk9 z*SG)9;J@qGe|O`*>)L-0c-cL|B`=G}NF@M9*xR8TpPR&nIP|ip88KuWa|0IrZHDlb z4;9vtf@;44ne7jH{uNOZ8yRa=S>NZHTmCB;?`7A+T|9VKGm1z-yvnOWcvr{MBd?0e zk;U`SwyRTg=}a@OP$n0Y9t{^%M8V2e#eejxVRT}mh^u3(FcFLjy1kYC44Wxjh;VJ) zqDbDlU%+0AvoGibX|2Y`$OQe2Y9f|y%@URA!fPTt>cQT=)X*LF;wvmJe*C(q>Af9% zWd7@lk7V`6>V@|bF-Uuz)(hm!ZZD2r=_Nk&E=I+|ULuN(uQz&$2HxM^$)TFP1uLNV za;Nt!JeR4k0WYS!;jSRqNc|i6zX>h$Pip-pRGKrONH~;)D9K6StY5z=BK2!F6lmF- zqH)AASZY|9Eq2qaezf2%=ox=f{#)SLXXx@SLFttK=X2j$8Ja2G21c!+CMD?zv=koV9`Hg!5Xp%wYL1)s-#@=6Iw9RcN?(zhc`uBEtT}g_#-aF{ zn&AVGEt$=OkX>S`R8xN+tPK>NC6DwzWMH1RH`qRac`d@}%O{4gUg2uNZ2s!nVn_Ol z8kzqM6x?H(*41_{Lh`ecHM=TG*6e{9v2^j^nt>%C49*@N4S8RL1eY1sjQ2&gV4gn@ zJF*wl*kkWQRK8C)-xmpGt0sRS;;Wozj_+Qi2@!yk5+cS#!2S-Nh|?9CvoR`U8T1fzad+VRk8|P9KTKeHFU$pB?b|)Q^Aj7@EkCWdZ`%1LF<3lH22DwT6k1(yT7-|F5 z(R|&h_yKCN#MJITkm7II!pPYF2zDC$`XBKg?p2Q5 zz&`RdHUAWIQ=PNg(c@E5HPST{e-_Bf*+ZeppNf`Isz<~@r9s%VU34t0eJYauRS5_` zC4VM5#i?Q9fU%mpDhhMY#@C+BvCp8s&G}4(*S^qSJ{#m7`urSLr1*jdod};~-?kPCzDJ~BX9H_e5R?_so zl2Tv#{VNEMXXw^fqN%N-Gp;oMT2u{F9l1q@)2?r_zZTLTSOnl3F04i;xEb+l;qXUZ zaYeT2*w-SxY^|6yo;w@0H0K~(=;a|CL;P*9_xMK5Q-N~OHzH9VspSlW7_s&nXk+`m z)Zf>?5#fGG&Ljo0kp61cN@VKXU&KS7eW$-jjYwj^!n5h7Oq*OCtnV+(1PBVapA!tB zp;wlc1bT(k;Oq+zt`y`foK4sIi#I~gc{zu17zK!m!{|?M4G>jBfA?|@*PJ8V?`91U zZH+Tt&cOgJJUfN6#~7%o4-{36vtG`jW!^R!D29R6?;0o?>ywHoaFCb_HlI5P3g-a2 zJV?9*vt{SOkQ@fm2ZO=FMht?;kWfZsxIP%W?IwLZ1Wg<%qTC^>i5kN}D?S`5($LQI zp`u1m4T$_UAvsTlLq)ArrmM!U3Z1S+@)=`Z1DnHUIMb1aN$Ci%?lc-Esv$e$>%(B& z-j9^6qMYAABH7b9HO#bMu;wk{@)673>0lEPtKblDd3g|aPvJ_=<2 zC;e2Xt9&lMB?HL<(&f4Y*zyos*+x*fB@E=op7XD9&_Vgh<+%BkE%Ucwe1r zlE7_Z3&0SVwm(a-^qdHz9yub3@<{~SRyE#1rwLZo0-8ET)PkKCISpawWjosOF)+LC zrm(U2nM%#Zx@bSL57S3uVSO1)XHev~M=8555w`v~aIXn8Xq<2|Y<7=>A!P#H;GcDr zI37Iu52`<2q&2rUKLkCMmd_=E;Z`Ii^LaLelKz=RD@?$SMX{1h&QR7AQI58ZN5fm_ z@_13M<2pD}PzFhY&2!-1N+>agfZHK+0^1M}Gr)x$cTbYp^etbD0&*h67|!_hM=&mzqZQ_&YDilio!HDjp3 zr;x)qxr=)Y$Zc(uYJxYyLz`HICiE#9Wlx4{;O;~bRl4KAfBLHSBz!g5_0{{6L^QH} zs@*Vml8E)0su{zn@#Io7@|!H8v}}r-EaDzg`qAW)1{Qd#v zSP;ju!f%!AXtCWZxv`r%I5v;QW~s=VC5g67g@5B3U7afKFs>GT2kaa{0n^~9nn2a3 z2`6y15C3eT#nZ5_hR`3=pe{|Iu<4>TWTR)Mi|2IGsO=|ctYA8L!V0=HUG#}pJ#zj_ z7gFiY!lk?oZ5H^+43QBuGt)UE6XZDau~>4>!usu>A!383XF8{qc^fkm%;H-*HVbyN zE;G^jDEfV-f(?)}o|epn0U(=p%@lDDlvJ2i@?rO>V0t}g2^mtVeWU{(Fsr1cO|t;P zZ*$R6FWvjV%^Yew2Zr6Nv~VtL6qDzO>iQ3RDQ}LL2Y|849IEIuqh%yg!;GfMKcKA~ zTJr|0Mk1_T4k|F2I9N}3{MkszUZXy zE2g9QnEE&Kl|5GJ>Ko^ah}K2WHeDxUusPfhg`D_-@g9!SED%0VjpC9TT<*YS$Z(q^ z#lR7pumA$%JnFDOJQlqjw7})6w|5T^lZA(VG=G68pD~7IdZ4Vc*ih=Ft1dXRGh1nQ zqcWY}X5tES?VVU4G@M-vflB?T;zCiQc0MF?W31us0OO403tf_{CdD%&M8c*R^$VTM zxjejAa<~)zuE7h%MTX;l7Kx;oak$|agDziJ@5^a;P)$KU1@}oVLAiE__Ae6Sx%#_{ zVTQj5urFGye22q-6bTghqp0J^VP==a%r26)E(RmLMkq`L`&#~^YU1FJVgwE{_Ff`7 z*jAfV=&>xp$1(D5q1{VFLXeUi@d7p^y0Jv`Ew5fUTo$G2y!8T-5;4L8#JpgKPcaeM zGjyqVIk^PFiWb}Hv2BA-gxx{pGSL>?=7nYO#`L3Saz#UZ&lVb;D=O%_w$Q3v;Mext zv$^2Y5gWaFb2<&DZ|e{!%Q%+kML|D_8d@?n`3b^7KE3pl=%=OAZ$Dufwo~8=@hPOo z{wqX#pYd1-YW5*G$n6y()PJ*AQa$8Unn2+zMJ;}+57(V~sz~py6p?{858fy6)s>al zcym{ZF!+TQ-GkJZzfxE}6Fm)vt`dF-y%(!Qxq$Sf7>JK`;4^ftf)`^ly}t_a0#oSj zD)FY+2fmJUP!Z6*BNOH~e9^f3%hwv8)>!q$OXkD(sx;QAi4 zmBz1w7ib(6t`n=+2N!-wL?&jt!x6p`jB;2p1qv;WpQWTS5IN(DtpOkN*S?vS4k}xea2@BwDad zbk#SEq~PsJm_>WF=)!hjz$Ch}U37Ka=02N|v0YikP+W%^?G$M)x7Mx*d*tZt4gU;W zaIS(g^j)I9fX^4ugjArFgSdj^ddO02Ov&xIOFXLuE@ue_E)N6-F4-+61ADscfrvSm ze%K>kk>fEDk5m-xlBG+ntAkqQiD&$FT*rnl@M3s0$7WZ6zLN)HJe?Zli{}B!aruDc zv>QxlIqpH#IQ|PbM#H@T=c*g{zG9irRbjziQA59p*e>@FUHenheV?7D-8p! zoqh;0CFAMXA?&K{RP8WilJWG)Vez3hgN_{*t?*Roi0B?UlX2smYFWkn@|DBmj;Qj` zLZJM;Bd`b0ri7y=oPv?pzy`qV z`3^UG1B|jaz@LQ(tw8YDm2;vgeQ{F!)3MyNZxC)n0LqW2mA{L2*zA~HY>J|{h<1lr+dUKI3&li(OHVY4 zx}FEG8&6s1MOFQaaWwC|(i88V2b~{J9sUsG9MhR1RNx(5o#HPDm%iH!w6D(v@iAoG z^A|*WJA^=)S1yXD>_`=Uns`w-{ntU7QsD(>F2bLop13UdlIViKzaEzm{{+kC-pjyC zWim_+SLzD}YhTFAx|MQb>?{Je1~)7c|HjW#S75-JLL;xJmz%DLz4YpzkW*$~ML5TI z^xB`|UOeowN`-?G<%g^B)D42{9~cve>`L_IHEf!5lynnov-g^iH6AV|j)}0v#3>Pf z6B8-=I#|X$GH$>E@ceZ+lVD9b2jg)pW9e+OTri?L&eNCIMfLwf<0n0hr$)i{a^`RC zw!0zf`ztRR2cHbNA!<2XR_V<*L{;BZB~@O(0U2mF&oV`y>PN{u+wM0-uvEZ-8e0Z9 zxOBGT9Z{Elx+&^~4$pKBgMJYU3BnuGxOWpMHjCO8izk`-PA&%0974|FJ3x)<#RAfz zs8MBro2-p8LDrN?!x=&EaAx~aaHr!k7q2AbX@tDg^)J!F{}(T`&KmBFzr??}*?WJ9 zN})qDorBBvoIx+$g7z|tKDmXMD0jEFZi#yTyDlT_E^mt`v0n3UgT+sztG7it{iK>f z9bZ7gn8=fF4ncCn9Z@NE8jLL}+});*N{q3?;HJ|rcSKKpyfJ#dY)}2~iWGB1rZd|$ zVl26Bm=ctHPqd-Hd*J=EDCVB%%@gc@53bWgEDUD;ZRl`L{2f{`2#lIQ&)yf^JOni9 zo_J)GG?BDk$R?m75Jb(Xfs}ctpDXJD}W9pI%Ewf0-YSPr@n9})b| z4J4DL6QT0iK-)wKE`gogE=>X&}*qqhyI!XH%5y>liQ?ea5qpl|gTrl3l?{ zPD9Qc3FGckQ#N)CWN1~Pl|z)!tBQXVG6D4WXP|KAIrwFv(b4cP7+V)%1JYf!N8#M_2QnrDt>E1Q$#Qx3 znmL=Yx^*YgsXu0G=L@BbZ8{ZcFx?2OL=v~k?_x9{Z#E%7PJ>|eDUz7G$ic2qN=%lC zlxoSU0q~L94N-EkjHiz+84>{3VCi>r@y`DW$AvO^TyM!o6CwYor1JJIgk_A=|3h#} z)q9o;Gp0~VwER$?Y*5x7VL^0)seo2Q%VemrN228$I-=>?#K`K{P9MkMtsN18!WA)c zhQ4VJ^@x=mjW#t_Hqg&pp(C-9?fC=aWU`Gb@H~{@%E~zT1f;@%c;Lu+sva*byQPZs zX1pAzkJ>}U@v6D{39^A>>J==1A{E9;js$RHhaWXelvN#b_MkAD?!?PR=w8)d&-GKv z)c-C?M*CI;Fm}+oB$+XIay`|>@EX-PR5u|Ym?;+$I;ywODqpBkp`BNaTw#wa9QH$$HkwHbz_$kvdQ0OksGJ4L=#;*ENzf{b}oZDCMSB>^j& zr_0c(Vi{~hvyY+tn{MK}5ZKM0VS72)V4qh31G_~RD#-F7yI`-jCu;oRHndliW`ga> zRfcQb`S&JzS5!?5s3^+^=h;o5KjV*w1kS`(k}n|wCbN!-VFcoxp z7QK}!D{6~rbgJx&QxSoc<$L<7-IQ5bnyk_1R0fV6+D&^a%UJ;6$5rI(K&Dewp|)bt0sRXr1clY(9NZUDnr!7_|2-SQK0Wr%aavt3A*xa`TpI))uQv zoVBz&T~HKov^xxEk@Ot#+y73_WSPA%CDF0nneWK1-WCm1QOuyaZ~ zsM+kH)s}T_l;$G@&1!?`yZom^YJ;OLpgpxEM~NXwF)h*4%<|H%dbcBc6&s#LmESBmmSy?Tdw`}^Z9-hHdN?$yJu zhO~*wI=g@>)sr)|g|w$0B!&e!_2skLBI;USzJ+%?>dSff`KW;$jvxPqay)*fH`NCy{Vn?Laha%3Sx6fn zmudR)g>>a{Sx#TLkjz%Hy*}pxz1#|7jYY#+NuiHhLNi;*7jd{Xptby$)}Ok!mTf?U ze`qax< zy8~!#TPc|q^fCNXyhq*}K=<3ql!vtuV7x z^IYnd;@mymP9wUA`pk%r$9RX$h)AX2tQ2+%bxvQ6bqKJ}Xb#}OqX0e*8;GO+^NiWlrL&S*|I-;nYzbg8RM3;How_j6rRLKV7!h)$9TN|_+qE?+<}O#jK4Q4;>ZB_wrqK!z`8epF2osfPoNr6H(*KNyPTf{kS%aB%SZ z!l2|()Y|9O3kTlNAT9~0r??4Z1QoA}Z!J9%LVs6r%>G2naNG;NAzE z1)$;2$>cVi8UZ^WO7T>6mDg7|{HeB`%07(+!Bp$QI*VVV;vB&ATwRieMDeM57AoTr zsT7Rwf+>GrL@b3r4+)s9T&htpr$Oiq5SFRN?Lw;gthU{qL#3NUfZGO{qGRDF|%gR%iCg;^lr7G%s&P{pZkPRI`Ph3i99 z0vvQs7>^KqB!S1*xZz!~`D_LbM|lURglPflCZs^4KVPJPucqMc4r4%&>SqaRsf)c; zBMb2%KhEV}fPFSM#HlxIpaLdHZT%xQV!F0UFTGv>0Va!M)yrwm3s{IE{t&lrAK`|8 z2nGC%+p&>_Iv~kts&C_82ky&S#|2s5HWDF88YlKf>p1T5AU5!Hk!@oD%2fhr1}@S; z+Zn!jAys;D?0Srqk9Ke4=$4r@;z+p6nGo$m^SjI7%vpRv3`;hE2|5kn1Koj~o&pOv z0ePnpVuj!l0MV(#!-aCBUITuYtix>k_Ikvv@;WFqMc-4rag7WQ3`y4+5bEY4BwuGZ zmHwcO`T|hGF<;bLhuXRmL8%I}eGFWXf-g3Cmu5iRg%erUQE@Z}Mga`#kf<|&jGPEF zl-C7sw&6}RV+lOD5t3H-kP#j7)d%;yj8m$~ect6Xg*__qI#atAJD3*OMAA;!4q+WV z6>%2QP~aJ6r4qJt_l1=3A|%G?^u&vhBOt23D66DiSJ|}zZ$CaT&o|ECbO@T@3@UyM zSKuuoq*l?17op^QLFq5a>UG`wz)ukl%m?2ZS%K&e7f-$BBE^0x%YdIs+rmwJMc=+8 zAJcDyQl?>wAU^(MFK&Q+bC}UnhU=F@soAM8(~1V$w9(bMl5gUz>Fm!p=MJNeJ(ZPb zSWj6cG~cT=U-QN{H6gnpZRrUqb}OCjDZdLq5f+}`R(V}kr*$vOP;DFKy$oe(D_wpW zJZA%$ugG-$egr-Ciaa1^s7x_9=k5<*G3P14lK8->So>AkG&Ila2_`1EGzoe}K4ra% z@3V4ezA8V_%J$S4oxJ)w)WEFVA+O8!8YITUy<|m}T)cbBd`;^|``&<<^96N!Q@)k3 zKg2ytV@C*5Z0pE6VlOa+sW0j1o3au(t?yfM2)FRzCdm0T8H14srq6E)fnoiO?{l+jVPd7LPZ+)fqca0Xb6rnF4~XHw3vK8lKYBppIjz+OH+=2guLGy~Ut0PTD^kA!+K zenuJg4kdmnzt&HNP`@>PwSBd|#;Fi0Z0=nvZYeH=O4aKl)*C-*oR)#pqjIVdWR0)> zndF0j>E{xXY1(J9skW5zKbQ3>=5raMpFmEEFa0Y}`_E-RyP&1(-E2Ir%arcN3Y3;5 zmuO$-p2(8#YK~kps1`DNpZHXZq^$P7Mh&ML4oDg&tO$xxTh=%OoaW@TUN#~dJ8+|j ztLQAv`$DExwf80})lNpvHAGc%9zaaKK2vNa-blyRn@`p`^K-tG#oA^1>q}W4=~&{w zk})a2hoIajx#j`HkC}y+aSn+X?U5X#u3yOuF>4+TOQX7n~)EQ?Vua|A@c2@v;iQKTdC^+S~MdINg^Db%5?`9z*fVZ(n)!oE(7ihurR9)l( zbJe}8k_Hp_g;~2Fe)*E^&#L4U_5Z|6e}6vH^4xf^eyf1Z8{snQ9G} zodYkg03G#4UWi{Pd$_FMfTzi_5I}|`);uvE{A}iHE`hz?EaJGJxr7s>YtF-~U4YZF zWqh?0m!PcevsVl25C;Ipy#EO1ZB^E!S$wcJyFmY=UoL^pQ*&=Kfn$Q1Xp>SnqRx|ya0 zPgUK__HlQ!?XQ0B&$^9}2^rtraX;Irh0j`sOm=^_&GX%wt=zq_pXT5z|7lAUQ1(+k zKOLTqHoQ@GgP+E7ws?Mu8z~d2Z_3B8@T6KF<;$&)(}E|cQH@cT6*HEfp93uC)?dRn z{)bPxS{TMH-2K5Wv&W}eAT4;TYGIqFh0#Y*%NH#i;uhv8U>N&zzQw~7UJx#-$WLEv zLF3$2J3Lh%eg%tzGJALq9^ObTc$7Un+;3)Y!dY{%qqv1J-^w1?Jh#4u(Js%RgcPe5 z9$yCs4msD;61d|FMBxME!DPtRFToaPhxXOaAA9fMn(8V2}Px zMLDvwg9#DNF|{U{;9x?7zu)A)nZDrf5&ZWb05SevO1K#=7noukpu_z4UyHK%Z*7cx z+NMBty?v2V1=OQz&Q<9{8^%ca035RuO5k{P3>d~vrZQFN$+0pD!Qiisl@DLLeO2{!4 zCJKt2o#3=6WTK2uaJ_O)Nnon#uz9LYV*LQfRukc~%)^}zvJUGAioli({rnvh8lNHG1&lfk7fPLjd;3H1=G&&wfeGR$?y zH_&5~WwnP@sztje!)kS$4o{X(yDC=lXF~67JgiN}RVLy(U6>44C3glS+dD;m9%{=9 zioGy`JU3Ogz=W<`k`;V0Y%rY9Yg<+5`|qXk0OMz5=Yf7FldZDc1gX(SF}yczp-$h) zc>VNx>chX^`cucPQhGbDtf%ksBJNux7<9YZ6wfPWW>}1d`?;9+U*E|VVW(A!9_Mc} z6?xl3ZKlbDy5~GJT4BX$f5wW;;C>1z&f%^Uo1Barj7$f*leY;Vvxg>414}+c$EL}+ zlre!iRtJav6P$^UtA)b)mh)Azz>+-XWd>Bs=|Jl|+SAZVqn^`c!~>fKP_TOf@fki{ z(VS|pShcBeIvfLg>5W&c7&_X>LLSE%(s+PjXS*0Sxc>tT8!Vh*+j)kp^Z)b>#I|KK zpj-V+$7Wz1^C)>H=wCi{pNZYB*s(ocgnt@m9GE>gGje@q&y45c}!n4 zlJb9qgDsD$Es@VhV(Futx4rmqwcWyqZ)wsJ`Kj*`EQ^y8m&!PO??`I0RMv{i-UQi0 zXQ|uOD-H#Dx*f9AMn_i>8x(m!=yv^5S)dne%N@T=ZbhD^71VP%tUsm+KfuzEXv+p>X?hHaLynHTIlKxiJM7C7X3Ze7kN zHaXC#q}z*s!b9p4%hpY|Y@n2)mfQ-r_AbN*s4*~vxJ3u{HrXv)m=DHu99n><1)HS% zLa~Z*`bIlpEn{J$sagN+vK{G zX`3L81eZ}M;e@g6dI|+#KBMPfvn+k_CThA}PKB?oa62rtf2efO3UW~L6>Yb1cdkSt zV}8#jI_Se;s4cmhVu5LSIK*dqqvtOB>*%z+4jEYM9fcZ@evZ5W6xULV? z7$PHS^$uCB&foP7$Y}Bc27CEv@b_G_$qEnq-*$oLlpFY_Q?s{(nO~P*}F34>zhfL=F z^89Yux%41e9ERAKL7(gaw^Fcn4YCTo{j2mX4}NR6@F1EMqt&w;K;{H>sBg&(wjVvL z&Mv;tO_9P<;v!B$P^UZy3HkJ9o=l1=YjfhJ?Eyc9EqpUic6Y%o0Dasaf%XKwuuI05 z5nb7_Zo>~6-Nt;IVz|0gD9506fg!j2UI<#=Q`US*?;Mfu(%PTp4M+bkvAWf5>QQAM zUTiP*V)TC5vNYU0%-^-RnQ4|u&3~0X_DB`*j1FZtCbMbx#w=})VS|xJc+O&QnPpI% z?>=LR0Lce`mC^bbrLdZG?^hZ8z)Euu?v<`{K)!8{*xqYCwh6FiC2T8R(&&dTmuG-9 zWQVlkrmn{>Dmxse^H?Dar&$4p^V52TlezB{0Hltq3qY^x*gy<91R$!;W(X68Yr5DXro#F*FFv*{{`XbW;R~*_ zgo^{P{LUNz8f_GT z*l9Hv$mBIsJew0~EbS-_o9)|N)ip&(>4IK-)ru&$91WJihjno(`#wuT=#259!*6iG z9j5nxlQ%H&H_kxxSW6wQ!1I<*bI(9Y$)hP(WNrL}oRyE^W!zQ85-#_Fmfq*8vec|T zi;$j!H2*4QGw~v{gP?QLV708-MP;dZ`y$)`hw0OEa!4Je5n;{BvDgPaiH8Ds!dD7w zt+CFbaKr^?9zIu67VWy;j4o_XqN%?tDy(bG0*31@!D?HLUfu zK<51Zfc@v4^Rk-5wkXl`^Dx{Tq$B4s!ow8&2f~lGQ}7iiv7YJg{Nw*V{ZMy5O)tU| zc92{km~>GNaqTY;0jYmUR>tKO$;{L38S_Nnxg_fY=xVQy5ndk7$xSr< zFIyRM5o`BQAb9-#m#p`YDXiEGK`DTzAgeDFy^Ioh$NlY4Hu3qCVk5k#{fx`>*)4=_ zt*14&5U#s{e!GQzxP?M*W2^0?rnk$m6Q-v;LNLG~x0R7){%vq*$nD#2gYNC5^S2Rt zmot*$?vxP{d0F|p=_7GiNX9?zp^PE!5_|lmM(3yaU=fzy+gWx(?oWnpprv;J@XNIS z4ph$#6n_^qZzJ`*i>IA5_b!ZE1@y;V+vwEc9*j;6?tugzr1tlez3H`katP4(`aPL! zn_XCNVVLAm%6(<%d?VP34=GlJ4HE!ZchZRaWgwzNYWNdvCnqDx{rj*f6pgN9rD+F8 zzi2h;Sco(9#$}MsbZt}uC_2mvY*@%nsH^95mYQsQevVxUrDa$g)I(9Rq5=8X5GCvB zO*M2YGDou_O37wuQMEH^GgZ{A#8Tp}y9B6{nGD+cmS&X}>fFUUHqtQ7s_L)@OKUY; zLO5U%ozScl9UH(;w|H)qbSu4tt*JZ7AX-jgyQ~rtlvy7S+vo+S)soigR)D^xn0Dw^ zJ$+{}d3#$G0Di=u#>OcJz)+CUbZr1bg$aa`BhdzR_qM)qX@ipPdt0lJJ!6%Rm8?%1 zPRD($qzX6g!~1trZJ}#0CbSG`7VKn_j06HMH48N;#n*CzEJOWB4=iQuo9t_eQlyEO z5G!wg2Se>1UyGeX%Cc=btOqUIEals)4ZXx+1pqA1IIPNXD_tf-ZjMpL=JGDz#w3T8 z$OZg>??JSPO%y8XKkL zR0T0~Kj%i1Uhtw*Bmi@UfP29n}$Fv5f&3CiVgOC-21>*8`2Qu&t|o~jMG zsC&4bx;;o~(cCsDQgs0v4y~tF0)1Sf*M#*% z(n$M`A!yX4K%oFq0A#-~p7sl?2{v7%#JomQBMF*vje1BcgEyFK{J5b8lB(UwOlkef z#N>0!N7N0^^KmS8QdvjEVQ>*)l3F#;_bv}QZ4iN@0A(Sc~Isa4{MP;w#_wg0jt_4$|O^K^+A$2a%5nbZ_u&>?2ED)i4qjIkFHHSVH?m^{bk zd5YAx`RuRE2f04Bg_4r2?kn<=x3GTc;NtUA&_26>bFRX|g?ByeptS9ur z>v49?GTpRU^-5Z@6|Ij3ZBACW*f`lL9el-{+U|)NIXk$fqCT!mT$yaWjHl#sRRM)qC2Y#oC6dN?zJGvV^N4=Wv5P08`ScRP z`9HMFyOY1fiauQ)ll_@KDsR05+3kFJt2#DbbPCA33g@UwpQTtq*pdTN6jz>^VzFL( zF$HJ=7heS{DNu257{?(uG_PRAu-MYAf)%gN&7n^!;OJjJ2#o0(qm^Qr%$!u^A1hdG z?KW_8t`_TAdlgFvH67rE z$j?$GbxH+6`i_nC3;8vtL^Q6JvMx{ye8wW;c95$$0Rn`B(s@B6|3Tn~Y zX%>4pVdRWj*@kA)rHFlQ4*r~hr#Y>09jR$orHn)_cH2D4@mY@qYftvLU! z`{6AJz|&qlr5(^((vYe z0_zfsN{4G%uVWClUSJ!qS?PnsUptO%TrLC&bRmd67&ag8scl(~lf2S?6rT=W<6Xy! z(;>(t*MWw^zb&jtKNz&YA{SHNI#vsJaMP+fR%6!vZ`HAyRLt`G923Bj8$Xw(weGl1 zg~EmF>AAWfhWRw4t~JXZZ7q7Wo)z{0sIbwMuziSz*0btq7HzI)l`}8G(U*)9;A#(f zRGT~X+^S8QgH?y(&D6I3-*Ayj5S^1%ACwuFn$)){RawTN-AZ!f!d*Lnn`O+|+!2?o z*+^F&-k=Q}ndxto4aQaN{oRAZ+ONIN1=u_#+?^zXDT@9?S zZOb0GI-tjE`$9uNFOR69RTn>q=ot^#_iAKSNw<3ASibKQYWkpOfTk~GF+W4WSoz&; z9D+=RvF`MQ^|Wo2y=#oJPr*)=ztjHHEoflbN(mZXdUG7!a`nMR@`?0PW2>^h;X4}E z*s5EK>AH4@^T(}J&=``$)-6=2352g})T)V8g5q&Yj@9b7&;9tN$wO`+rcagrJ|gAwEKFVwY}#nDca_$QxEG*b-kuVz*XSWQB6 zaQ8#hxw%y-+BPcYV1x7IKZ<||>XL=+Xk8H(iAtIz$9aqS~wJ$wr$tK4el0=$iYCJLb&Wc- zw5mc^3vF#hhl7Ok=4Sas*?_LmjFwhLB%tex2~pqzJ6xZ8uBG*t?!ROdP|6pf2J2f{ z(ZSd#HhVA()nF~9iULn439VC))=-!C(a6@;8l36x)W&M(i}OB6pV6T$N>{dlz2+iq zZex92^17V=aaPAb&$tIk*=<#&ac!ZLO&mIB zFOT4Mt3u)Jm5HWed(h1SdaS)-k8ig}ziZmV^swW+45CTxl?cvn!HCwjhrD`kEgf!e zAs%HN86B+G0ML&+C`?_|!D^>Z+)9BRZ4&KCJYt)c=8U0c9WmSa)Tg7>%{8CB9bpQ& zK<7GI@9WdX&Msy5@x|jH9XdrEJOv7ueHQjC8ng!`bl` zFlVy2tu*?j>I>1e+C6q>moS1SD6_LwAsXTXK7<6S<;)326 zg8uF`Xk0@pmCP<6S0^`7wJwlW4$>Q4%0zeb&SDK@5$<{Jq9pQvsIX!)o$F#HK2u_V zb$_~JBjkWmq|PP_3^~TpbZr?Xrq1=GYs)2@RUTImVp7QbgHv~u0~4Vap0d)yKtp=Z zYk^8%8p^{l?kTIHKN?vg4LCElJ_T9(B3*yVs#()RhSVj&Og9+rFr1Z&5_M~(^S5G% zJ|$|gRWb{Am0j%42;?fIeIM>pn0{CO3lmZFd17KT@=(O3U*XPussbbV1FuZK)^_mAZ0=iN{P}!k&V<)lz^ZS zEP#l?g0Vrs7R%HBch242Y>@Zf@AH?BxqHt&bZa#dH~idCI^8?Yy}(kOzP$~^vxHnj^{kX_Y=$JY{Oy!Fc&3N3Vwa&n$73`EpZiFG49l|? zAusFXS%KU}>YxsT4hChJdY6NG%V+qm5aUCg)qT@xItq^=oz%(c0Jo~9d|_?%s3?Z#!5qRC@L|}=Wf^goTu`@PA}1t7vxDA zI84uJ@;Rt}zU}=?>#Ccz3UJy$(D!z3=fJ4|nyeSlf)q^L$?%tD@IbQcnh zZ#YEGPxer&JM^aZV}CwRBZljGHg01!*4pqDDalHIhAb2B&~vl4bG6ZYE6l+nwN~DN zTKDgvkMGd)vR84=#Q)lXt$V2FaNTcz=YMzLLu8pVTv8dH0~VKukAVF3*Kpl!XBZ@o z&?obc;JgvoT-4VK>CBz_tLn$QXx?2CDx2@p+eLwxVbK`Gj=S}TonP?{GnE3(^>i-izgQlI1l&wfLvr zM}-dIyL+V$Vcb9UiE3~Ux$e`mvKl5G9p@2N2!IBao4fWn^0)TTllSR& zSWCM&w6VNPU)~2r(=p1qU%wd}+vE2`efc)McEA1%mP?BQy> z80fJA%+HTByFhQxOnQig;rZbV=XM2;7C_6?Z6BMf!h$$4ygeXfLXPQW2ME%Zhk01J zJ5=Dt0AG)mn5(@V9QuIXR83#wZ16gU8>CnGIHE^z)jJ^xuAeTL?4^>yK zCHo@~@xPUG#-`f7gl9kFNQYwO4LYB<#S z<8erJ>!{Nc`Y=><$4d~Y)UnutYvI4bu03aQR@(d($Dhy@I7?9`>R$T<*9@9571KMB zy#Io*Fp2vAOK3^WnQM2_vo=8h++PO5``Qe5f6I|LkIyoDxXP{7zb4+w;h}||GM}U*4yb${CB{cG1 zknqYVcbcqMnIdt{H|2u$Rz=ra)wte+(e++nB!|u)Npy>c2raIm6#@O43k>Jy=w^Rz z+-&{-X!f@YHJdq}-J5x#kjk^S=Re&vG@>10MT@X7R)57C&s<;z$3Z#UuZ%#UuZz#jm1U{Hk$_wg02Vum4+%U;n2TzmIP5 z`^GJv{vR!#`EM~5+O7* zUp51xux923$h^H<}2aj^f-g)=8RY#za26yYh|@3`?~7e0T* z(d;Ean(-7|0uADLI$VS(D0yT7{PvIB#W|1#aiA05}_`nvYa5di(s+u)SvJs~a($?`*2Op+&H>L= z;TElF*+zZWVbj4$8b40c)OC8R{|4difoR}DrN)De84z9&2=)?hmddsK-$8D-*^(vD z0QE?b2*|aD$*?MmeDTo4^-y+vKy%mYxn72O1m1R4O|o%8%vqu}XLMw}-n3~5jEelk z>((m5Gw)Uu8+U_pHt1Q_d@xP{Ui>%Dj=tr=Tf>KR!v>uBucG-IbkXcQ;A8zW@Nozu zX@MUO!?X=_WP=5KCf%VN68Kj27J_f18#YSdKebW+Cv zJ89?3dRp+2*I`qp*5l}ZlRhI8EkXutJh9=`eRZ&bxH5FBK<^^NRVH9`VZ7N)(}Q}; z_yz(yETz}p(A%jK-NBFF09(TGt+(i-5Z`CP78p>?-9V3S)h||G z-asq2>UUyyN!q6O$dLU3rL$ZmMtU=B8_p)?!1Elm!YRN_|`Bk844Fle7@^g$M}>4Y!Tj#*m>G zd(VR3ulu^FPvQ&QHJabn(v3Uy&gyTqG-W5Oh8BJu+`bc2j2bW6tuGS4y#Q(0Xj~&Z zj@w9|?1nae&Sd&^H;ONt9Bj7-<3zES-qgpcB~_ICmY(7L&kb*^YQ$ocmBSzYo&|TY1Ha%-JN)#2IGW@ z8y@gX*Y9opCxa5DOpNA}t>&SmaCqSm@h`ge0JyS+H1+_vvA5{t0sSG!vx5(UWhkWw z4}wiu5?p>zpRK6x)KIT?0r=l*f{(llXu`RpJuLI0_P?eMOtWNP);;6|D1!N z71Of!fT0V6>4)@G=_t3=2l^8#8hH65)Uc3_exzTdl+HPVy>SV3I--wJx2~h*NA&hd zTlsJdVeR=W0^ZraI0D7MV!HIGJ}h-{5{_a41%{%;x0NSp<58HcZl)7QVZE@2Zat>= z5m0eCrR%vast&@&?!qy>G1{xN{unG1$I-E4dbd~@A3578gQ?~i%wzID)N`7qSRF{V@cRX7v8Y`p10ddfjpT3|^OfqL0QS z3iGK{sD14?zy=*~vJb(S z$gK>3C^ugQX}16yr%mRa zm|0+_7nT`_Gv`g{3KfhKIszQ0IN6b7R3>D@7Q!xLZKmxKMZ-hsVN1XR9pqjz!~qn$ zn_XI`75F&VtpW#Z(zBWqTsAH}IU@Pcn!{R_=oiKe-y{+O{@AO~BbZv)`&PhGf z$E%6&DMNQ^rak0~VJd>&V-QRL@QHe!)CU?EU;C~qCX*2gn-&j;=|JC+^vX&786WUC zMp6)6fLwATHTkY!)uBBG;z0zRId#**Bme_D%+$BaZst7}d} zK3hwZPU{P-0{(cnq$#EY8n;1bpk!P}Po06t@FQ(K1A9coDVUQz0v_Cp>-1Zcx%5q) zeyNXDrl=8KF|~zBKn7Pk1vd~9ZTu4+0AUGw%~^e-M^X`pYP@{e+5Bf`aaO)CcZD0t2; zaxhQ9b_%zOel2kcobm>@y2LmIxqHWpWy)FlB3=|@_kPTcvS;ZHw-|*h zTX_kBgMi(cAX+O6=*0xlHvKF-2_l57H`$wxs3Y~a+iA~c%tuvfn>2M7G$-H5SQcetU&)aZ!$!Cjxy3AQ+%v!4kl)acKq5aTYP{= zYL1wM$GjZT3XeB)M4j>{ZORq5LtT;P6%QzX(KxSIhqq2XG1n~}#k%1hx>%upe$f}1 zpYw~}j+t@3M9Oa}y3!whaS;kNZ7QZiv9O`3*qqvw-Tg4xl9LCHL;IBqn~Ba@YvBn% znwsF7SvSq1+LFT5PNV8(q9^btvAGzG$FSyNs`3Zb-IU$h4Va=jxJuSwBYct26F;5~ z7Q|;(;GxY84!B5UDzR_ML(f74eoWS>u!Z2bE6ZAln}L1*ZXvF4y?`~ug=Jup_6n`RTay{FK3?irp1xQ!)@L_opLhB$NFmS%nXxnd%}rMk^De49H>zNNTL z4R7CXlI4=EI{&oZH0!O#dh@u=B;=eCI5wWENphReoS91x$hSnR(%Xm^r+Kief!|J{1$69|N5)478sfH08h>vAYWtL zW)@!0{3r`LtWoT-3c_NLz9m<6fvmh{zWPG3lu<%_Op=$U-c3+l8bKc%M8JNY6TsP$dGctjnaLL=IW z@8h{ncpGk~d_jxa3LpKeop=yN&B5As;;iCXbtno|mmZ?p4x&S*^Z~-M0t}QwL|c_O zj}Q)>JBp53^-SF`vIM`%TNCy?pMn&eI%`DZJ7U@VO0RYllhsX2>Ece}gSfAg6gTe@ zHI&*}bo6`Kb9+lQ3wi`RbYC(*(tL=x?Q?@$Ku}WpXJ>&OgeG^!h+e0%&Z7Gzo21X@ z2mJB zTxw&#RB_?tEt=XDn6sA-bQSCm`O0pf!?WqJZXj8oQ;Y7Rc~j<0!!DxzcByEP-qT&? zc13m{x9diA7g-tm*fBnMn)mJKvfO4cGs|g7cahQT50_uDbQC;b6V5HsJUg;?YM+3a zkPfoxATQu#E17w%0)k+6Ds(8}rS{@7GspjHFW#5*)>02b9YnmRyafoXVkfVhkYd}e@X>w_kp z4S+CiwhV)j8BSPbbPPP;yct^b-j|Df^~Y(!2QLRrNnS65E?S6f#|F1P==uk^o>m{J zsw<#8YcZoPW=25}eC)!5Wlkv`upebCR4N$V#Fra82JI&F>t3`%QXPnC1#N6Le_VfW zOnjg9ucML#o7?6#14vS|=k5N46<#OS zmeX{MLjdT%D+h9hSLIbH%W+kl<^o4u{JSR|tjofyfQ_%x*{j4zpu^7Vz;}(K->wrq zS@;C~esa$BB8|QuC~`rt6R)0g>#mRe4sVYSipaBb{7NL)ltY@0*)2MGp zk0Ztg9<(?8>qgPlA0=l)ffAM5mJrMD#p~$MjiQ$d1x)fypwr{1*G*uGkI{&mL<<#P zC*LGq#%un~BClgqWzY>as*E>_Xdz#Ub5qw*(aj<|_8`0sLAyb{hhVXwr3pjC__(d< z?j7-JOu$Y{ZfKIG0lmQ1!Sy7D1{<+AgPm>>4=d`|XJL*eE>>rp3!b=DJgMXa!HF=% zEQLS$3eZlD-(j}bo??X5g7oN6QHjv87vGMxT0!^UF1~ZF0Ar&~2;MeKG*Q4)JaUI1 zWm?cZ9FhYxCX(GKA0b?D`2NNS(UM(1d_Dps;uT3a<33dtI|ILt+TSU>?!q{LKPKOH z5qv`ozf)vnut?6VFT~+k)A2Bn9nq?tbh08PBhWLn*TnGd)7k2G%Pwkr>zB?kVi4h= zhwU0X)CARHc2SKMrInj+e2tct+1$+AmD8uz5KsaFC>Ne1U(K*ekOoM7}4nBDLu zceOu>;)S4`ybJU8T_2E{{&Z?f<-iaz$Ai2!uZI~t2$EnDFIEecXMGw2VHS$UnR{#@ zEe+}fe7ea{I{2BV_UtO7>sXjk3O<8TV}ay1@@>~i+<2Mal5kAa!?qkhWq)s!=&&r5 z?*{p{H1u}8e0v3Nrh?wYte)`HiboNeh7&lN4zrm>kl`%;W|ab&KlCXEUs@KpSqldE zBuWsZAf^fo-I2xR12Ks++m#~{O(;H#z{2N>g$4WA5Pys)y zQNgrO$g&#QGu{Hd9HWkV2yVR@w(^iJuBBF<9{#Opt0TV=S}@q^e({;q^Cvwhf@+$V zIVkLHUDN~+8CKRJdiBv!qIdQjSZXxGC-lgR4=1saw3Jy8nq@w99u2+9KAJijYP0#Y zVzlT?AB+*VK`GebVR0{fjg&kr+IWhKtpK#ldYMJVbogO00Q;u#2>9(c=+Z|-e-wV< z5z$dy>ZI!G6rDbJM05`@>9kfdwmxh*E@p2}b5tT%Xcxg$yW}nc_Dp*ZFZs3ySipq( z@IF>U!(?^_s3y}-=--hFP_cH@&-(BXGPz`EGS*{s(SGMT=5!F61o?ohz4g1XJRYfxqNu%6<&O(FA(>F>#5r3Zf|} zQ*~l$IDuj{CN%O1v@n^bKOu!P2w-V+;t5Dz zyMqrsDNZPA%|5#3DbYnew~r=0g$bInpEf@ws+G5bQ=S$p6m{Bu>O2;l)8hR!axA{D zfe(?f;w5!L0(E*;baMW?1pe{o(F4zl9D5-&^ffhUO{x4@ajSDL$3cOqF8h^!7>~+>zf!B`MBU}z=nhA~@vJ&leFpqMP3%a2Y%?3k z5;Lkeu*Nxz&F>mx23YH8QZDu$-7NlXuLDTkkN!XM8?9eyl4 z8zdLG_r#kqBa>8{f`#t_YT%1SkQ_ctCZE^fPL7&XdIZENg$@W+ zC^l4~Le>Bw!z|TFLJG+y0QqC2^v?kLWS0nsuxmIZ;tt_Lbp^jz=ba!WKwexX!jmZg z{#sighh&RoqJqCnVpu8hOOiLXol+eYC(gzOJ_-hbt~2FC*HzOID8PWw%|lR0oyS!} zU&AbW5TdY(Exy*zxX~M*#t+=QJP#e{O?Tfn!ZVo@o_$Ond+`(g| znOKar%Ly29s?dCjw;t3ApNQqq8#Y`Gdxy)M zDd#-p$m0rVg|TjkpkV-1@D~o!%%#2V>v+9Ht1)CBWqoqY+Bh&3m3ENxT?XU5jb`oc!UQf^t1MbW|{M7(E~K|9d!4=t3*7TECVyI8u$jLRl(S#kn) zBWtfM=U#AFZY(&Q(lksVr&om2ah?)c&+>2pTOHdm%7r6gAKJOORHZC6!R7NrYOCV( zs&G0iDgxo^JX^imR#}aNu2oGnXRnsoC%SO@6UM6}oPH#np3=~%T2 zxgkjb3uF;k*@r8t4e&Q5`iK)^Xy)$wU*JXaYH!pN(!}R z?JJUxDCGXKZVI><%kvl2ToJzI*@sxo4d$`2z8+*HoH3AqA>e;42$Sf`Idfmwf#Y!n z?vN$nxJjm4d}Rd9Y^-hJ4)wELfBurjmxe0O$5wod0v|_f4kT(D)A}>~c-v>hE#~1A zbN?irQ9)89MX`2qNpMPFIT7N#-0aT<^b7fU9jh7?)>wgMxVFfRv+5zVCNQSbSg2IE z`U+&{i<_zzu<}=4FnI(HCR?jm$s$wc#gzIQxwjWZ#x!?Wf}n|)Q{SBHV9)_;*@#)cpU1# zY13L`vjgJCvZg!T#_S3tE;~ZHn?L^n2bfARwVfco_}lreZ=%@X3__3PQ|X~K;w~IC z_nssSwIYtz#^r3ICeMqRil0_KFP5r>r-RoPiQO)EQ%s*Bd@ah|{zRzHaTJ1U4&ZMS zwNptxcwMmmNlK5qh<>_#hL{U$ieF~neDx3Vza;p6-7PPP7N}_4OQJLC-}I7bm4wpF z8nX#f3~U}xza-kH)uU@{M}d_Jik1sFluo3MGexiD-}npKmR~T#e^KE~aUD#g56={j zUHEGzJvs|#Na3jIDWc`98$SM*GJ10lr$3f?u{FmzuySF}r=B$bv5>n>VK z)YZn*r*m<USkX@{k;zq?JJ_ zgMiEmRUc1IOJM~~rDA?yGY{L)9j(YC11=XV5b${(d~yNK58^3KiD7+yu}Xg|6uv+q zhf}WtadKmvAyu%OX^^nT-Fr3 z&C;lp<1%HVQqa<7>teHcVHgQ;mq0^Xi2-}vEp6+RF1DlcH%E;3t5E%+BXXaRP{mxy zIxI9yD9{$l{(PY{&_Azb!|cW#76?{M((rQ#pJ>|5bZg*9(4g2$P`dLz#p;nRYAsro zdglb~n>fgXumF(hb(~&@MmZfb9(V+2wKzS0qEvM3-cET4k}d#pk>5FhpUuEc;_-mr zK0wNmHZ#*|5;|#CIkQd_4=2@?iY5UV2^HkqZ2hs9*$43YvVq%%w@MqQGEM+IYG-i$ z+=oJtN(`0}olm-WZnRmje}{MDDD5rIM^K7&+@T_J4q1ZHCb4Z9o4i;kO71%+{rM|Dg7;a(k;49*4Md?YI%V700mwGP~Q(!i@f0>}1DN?PL?ry?E z1WsX-`+Wi&*qy^4C~O1Ep{aeF<}DYk^CrNMfSZkBHFE;<0jve)Qm}iQk9`woH(>i< z!gzYQcr)$$1bl<;h|lmq1NcTskoK++w}4pt%ET>Arg(T#csyR`>VDoy6S#G43e6}( zZ@;5Y%b-4mVaKZ?0DH_AU&R6GYjn|TIQX4R`(A^FathVH2II)@sLx8|Ttg4>qnsA5 z#8$YLe5+x`@%c&-3};wL`&S{uYAeGk&M-N+euF5nLz{e65C$XP(Zrx=+W9)35gQ01 zpTF>US$_u&6WeCT;`l9g^;0Y0f!Qo9eCFX8^N(@QxDns`&7io5P6vhV7;L&9B~OL; zA#1!Vgcx9=SZj!f%a1*c`Nm+*8!!u1XMaM|H$!E)?-O!Wii?B4Z^i+z`ss0+wiT%} zWoj4tZL9DoA5#1_@j&)*I5lV11p62)6T##>DUFnvbH+`IncGB*)HlJq0R5Gy55a@m zpt!~DgR|SktLodI(1IPp$O&JmfT+i6hGI-;DT{rjD!jFRLhtW@hPLno_pDz6)|Ayo zQi7AVREpce@4<>n^v?agqNvga%{G!oJ(t});J2CC7F|sR_IuTQ8;lzno-6_TY!+zv zFMq<;KQbxSuEr<|mN=Wes28g)oO?o2;w?`ZtWZC5Xu3A117!L^0aE{;2VW_f)Ggzk`lXesZtQKoxa^8+BV$_ zku20o4O&4Pyb;7`Z^ddH>>Z}k?@`Y;VVJUx9(xm<2Kc}?v2=D&;#)XVSw}a#g+qqd zY5iMbcKRl+3nagYwcDtv2r9QtG<2^JIYDNcLmycYj;eW4TX}*sb1#sqf`ZoLMgXc3T`yFM#?n$ zx|M#d!V$+d%Gd`Zk!{p(pC}WrS=S8#%Me`pqOQIpwFE&6AlmT7nHFIpIMYP8|jY$HiG0rX!WN-bBoPE;t@v|)`*)N=`^+) zCMMrf`&S*E6c=r-hG}Umoven$QZk+XT`gL5{0{ON`&*3RX$NvZa~#8mx8{HfB&5_)ux0WV82$ zmz@_6cwbx{Tiguk#m#6Q5(h76+(Cehzm_z^1-U|Otc_bR>p+`gSXrw5tz1!4d?{*5 z@gE4E5sFh00e5nE3?8aq?EwC)qb6${H>e9e^xzstr>wB+IL;<-Dsmq{8XN#1ZAA_h z&$DYB`B89*)Je~N2u7iXmV79Du0`y`CQ-YWo?YuO959FQ(1Nv&+?LOW(*1m9ZRxWT zp_*#k9I{ipk6_gJ1`YlQL|_*^^pVI_S1q9FABpRnigua@$JVZ@zoe-tF8hK3%6HE}}$PZ;X75M9Ritw%Ao8v5iY zt_|#^iuI0mkiu)%J91sK89Xk4P1_I#TiXqe9N79@y}^;m!zjQjKPPW+WMOBnJ0_lt z!wRqE!>lwK^RZ}aLHo9ij+Sh={IQVP4{mcbp)DK1n$(bXT=Y*ss4xsI&Z1%?j)P&F zMb8{>)Neinw=jO)SE30`eI0jzYpCY9@Ld$1-Y`tq2NI)s2`aQuMoxLn>wx|5v+0UY zL^C@BXw)ZiR*OFoPbiDXbwXsv|BjBp1_S&wwL1ZlxQ~Y5(LN2clm>Ne1V^~@)M0Z&wQlS`Lib^(6vt`&^vr8^gtR))yMPLe{}lN zxg~EpE6hV}g{W2`S6=9r-r^fA!6as9L#v$6_ln%y)n{I@MG-dNQ@W zvqm&m-yKgc)PQAQOc^IdQ*r*HkE{v0?j-ItV9h^%QVii6KqrO3Hoz2L@)5Fmy{o-2 z5k288JxoeI1J!yjmcHBU@CPJIyA;eTx>TIV<1E5oC&Kb^_rVS(`V&CCxQY-TgP&%Z z%vmaPGUmbfCK0YWFh-dap*vWg)&evkF4b@)a7ww6pRf=q$CIXQarEpqOHu%aN>LIT zk3{8z`D}5LMOqT&s*_(r*+9g|{9Lq+A{UXB>7h%vI()pY?%nF}=AVz=(9(ij$t#Cr zz{}e)Qd2_*0X%O4ZQSbUa~hfkv9?T+?X;ejtj zUWf1GU?eAOtyL$hv4MI+fWdeS_NAlOHNZ+}Lr}r@v|zjAIyl`qv)$1~#~DKte1F?L)Sd24(~78Qhod(P z()aGbb3hHkGm~9d1tp6@^k01g>kPqq*3}Y(f+l5hhf#353eevpBEP zaar1Pu1l07+{fx<#?vFo1=<|SuN5uRC%C$)wfv<i1i4~^XEXoX8^@70PHxpPC{mxnP-gBe1f!KB4yLhCXjY~am~$`++Wxi3(5g_-tU}VcE}k3k73sgqDc_bfmlV(Kn)dXPin_$9jQTakaWg+BkgswP`4*V|Y=sID};wg7K;0vnWb4bhc<#g*gW zO8>v~Q{NV@ycm@)X;gXg1u7r;o#=QGj@j!v!5-`T?J zetz_dOl0aCOS$_TI`4Je_c?g4+g>MxBUXFaLu>arGF!jWs6+zWU*Qmi?eQ(TUB?_k zT^euwO?h%oD2iZ8rteScQ8#a&8iat75o{kGqc=d?Bi z%6r?9p9wV!_JykS(7pgQn^~D&0Ie$uFF4nTFOs@5DaL$qj@k7&KU{#Fi$^E9!`SQO zMu%u(4Ablbj@E`72B;Mrkd^s8Yp@jvGQ`~7)E`9F1vz}SSZp*Z&RzY3{>sHHZb@=a&?9;&JxQmIp~NM%Pd_Yi)G_}v}pg>gN~*H8upD? z=4?83qtIp%=D&u>6*+^CMLu@JlzHJPP4VjbJCUF7~O+TT{jN#(M#GW>P2 z$6}0|e}WA_@5*u2KHBTGM7piqzfqNj8TnlSQ;@EsI#yqN{BlPUMfTPBmcptBk zw@|6wOyzgKhYw8HWH3)|u~# z^R42i8%G?v*UeiI6OSr647suje69hkZ38)BpEQ7nb-T+Yg^LhSYPjz;07}a!anF+U zR2c_ZLU_zCqDf*cXRSq6sXTbF8XQ_3C)LR&&)EA_+HOMV$S}C;4E|JOPHNBQyu8JO>_cB7++8XMtDNqXEoZo%yp- z;FVb<8(x%&7VJH2CHzdhMETM1+Igrv61i%ahetv}G-yka*(}9S&xE)JO8!)&2Ff_O z49U?DkC3SFzMR;Ax8+=*90eHe{*V?adMUycS$HYJ)CfT6YOl!j#z(J8?l&*7%q{^A z>1i!ZPUNM@?;QOByNFqV0bzN+t?Y}w1JGo4K%n~&kBB` z%1SO(9jdC*s_LdHPOL&D(9g?UN4%&+m6iD7PWMD}l2Llk8onoRmGbX=|WMJ-m6 zjIKx{>_*z4=oKROQ>o^P7760frbXRrxgWJ~v?EtZgVey%ic}_4cxmAWjwTP*amG4i zjIN!peuU&N zyDSr8={R4VFHm5Hm26@1!E7J%ci~=_$;#^SH(IEdgA1XYA5|6lxpTj=$}p;xBK}n5 zxAs*q2enfkR%g@Fb62Z2fi$1iEaTYyPacoM1xXU!pSVdhyZ$jfwnhF$>x?A!xa=Y#pfc3 zCBJa#2FXvN2DsUBf1_zHM|Bc_4b%oX)517f!TqhkIK5y%9{5XSdMY`kn!$=^rJvs0 z;LsDRIFXG_u(15J zjWwqn)sD^ctba_mN^?xGZYRT^tbBPhARN)o`gA2@{MTr{B1T zDP4=10fv{20TP9bu-A=DlZ7&Hw+riLwJ*<+H~m^nak`~24{Zf?+&~?=6%7i;X|rC= zhc%aBP!uvKFslGg<9Hyn~N zy@-`O2gV-?;0i*RTc9oVG<=XnL1-LiK!Q1)_3A7oOS;4L@R|y(7R&h?Q(2g1hrufs zbqKm8xj5vETIHpT7p3Rn1>21CGQ_8nsM!v{)H;#t4FR%@%a);R^x_AfD_4JLYNOje za$egtT)yVKw%MdS)b^36wz+8$MOA9KY;Cx0$~xg__0MYN=&HezC1CUa4-w@QS_p|K&`;DeGYb)=C>u|R zC|_h@{jtL*U_CR|RuDK|XN`oEVq}+b-lNwFo5+J8h6wBh#EX(_uPaKtK=fe!TN}Z+#je5Ra%?P?6$-i zAcU-h1It8HD4$ywyi28Z{rpJ6Dq#9H((&xcsX7Mj+ z7!*d5dKPx;A^&nP?pPxuFgwDuH(*<#K&C=OcZot#uG+zUV3QeeDl^o6EcRpM!!3Ne zJ1qF(7Jxs_mw*Ag@P9nI;YMK;ED?-NT7jP}Ui>gV#JmQ~Vwnox70FWQLt4lMWRdW4 z5~v8fg!Dm`WtJl^7nViMGQ2^9z$G#;PXf3CGQ&m}m?C_OLYD@W4~#;i<;;K-G=(>T zF@fn7tA8+jxg+ChYc+#{RY07E#wsOfAXiH{6ODl55n*hYU;rDy1mUU}e5eUt&PQVmUE%2n8Zau9mHZrJ)U=QwTNN4~Z{U>%He4fw(Uu@v zjjEyr*@mW$a8pO{Ia>O6GJtR!%Oc?y>MJNIn`=gG1T|by&3P3eo6GCDJStl)XRE!y z$l#Jr@x}83;S~ln$&+{jkvI*yUQt0GpzH4=wOaT}R|HCr(|kon~jhsBPx9&;aa zsH){Hsg&>S!iI+lx(ZIc3|4_&*al; zSp8ZI+h+GN%yPJ9Y^QL?l`*oXuE#N@X6?kkCA-nYPd4*wOm-%I;558jf{_!jn@z4p zy{FK+)wkyh>te^?qzEuWPp)Lh$aMt8BpL`osrg9Lab+UN9L(0(zZHPZcmLl>0F`8u zK@2_%1S=Toq|59bk|UzE7bny=ybgJ6B30AB? zMpU$-RVVbbPh#sp=>uU;3Z{>BF+6E(3cC}N97=GTxO{^inxzU|muU1&{xX4kRD_nG z&v5W5)+>p|RmrFRmi!kdfAzQI+mei%lWQ-O+>FY;N$QmR_20g=_82Yo)i9=$z6F{0 zC*P5Q2w_`1LM}G0$g6;FpaZ&v-Oi4*er@q!~I2m?N55y&{){G7*Z^O|eE4cXLJS_5K zwD-y5@VpFv2Ah(Ni@TkFRD=%5dcYWk=wZVO3A-Yfs$6f3$DiIHf1w8`$KGS#p<7c7 zwr?7nVq6V_;I~tZk!<7>3esiCasUZknQC0bCr@KijYr^1^~+SFOIjt&Guc@j6K?6A z#(}`M?b3{vsAx!<(dya@RFu+CwLP?!Yj_>QR@0(?;gc z^wyAzM-m6uqfOJfgS+XfbfbrT+U|*@2_u~rxjdcGo5Of;@8jY;onmL>eykfky)@3l zJ$)d~(<-V;i`kS|@LHTl8e8qd6TT$Ait~8YS3DFK@9AZnwnFUQ8|jGzDLUez5%Hc( z^^@o6nI^!8w-ac7yvOTAU_A|O?!<5INKF%?or=Jg$vX1DuB(IFlN|+js}nq|+jT=0 z80Yp}NmaU$*y9-EAh7_faRUlGPsz;tf0Cn}wOSu-3)pRQMlRM*|Z)H>pSK>7+2SV4Cx%FocSV zg<)i=IEMTq!E+fvY9@NFgmYdt#7VcG z^$SunxsAqW89kFe=Yz#=xSj`UjnJ0|vWx-u0mI-th^bfTpoC?QNJ^N&C{j=oS{w^; z!=D>C(PI7%t5h=-zw+L26&Baxf`RB$*#_Ir&CND$gxTyF>kW<-yE8I`YP2SZ4+C$_ zF|v{S$sD8k-@d!>z1;f#PL8atS+3E(!Kx%wnFW2eO?@yIbGDvnO0LoBg0)rU%D#P- zYutps_4dlQ+r7ZYeY+^&H8Q$@>k3tik%m@li4-~20&_CkvoNa`$zsj>YKm`@ zW?vur@x@LW<})(97pf6%d>|8|Mljqo%O?wc!-e+mCYRqRRgYBBYkuQ;SnvMnH#WkQ zyYiCUp$KZAH8WmT4pU{%+z%9Ram|fpN+qpnZnTD>^0DTI55~5`FVAg4-rl)x%DKpR zR@q1kFEZ|e6}ZyEXy-e&9=ghH8=r$x<61cGY2VVA2ovy~eRGR(2jlWQqoXpFp2{;WLzi~s8JTQ8b3D%&mLTshs3uG? z;gorBzuXbHQdic>xB+%fs92}mOLMb)2deAYH1LE{U5)fC7%4I;D9whPq&Q8Wf&+| zuOiGrA$7!m4gYD(Pv+_Aq*ce}CJ<@BmD|_rT0oP~S^X z1`*IMF`8urWyJ?#P%*XxlU)xe+7^245+kRz_3e208)F;%+k=0BU*&KjkPnGF=!Z*; zp{N=??wIk5%<^M63%e8GEG0dSw#x6cx2Mt1%9+iUU8+<*D{u+lsTa2Y9dr{oR(vn% zWfb6H?`>Rx$H3mk19-gA+vwSP8_dqw5jTqRtvy9s%Vvful%vvNQ!0Pp3w{dxv3mO$ zxwv-JmmlRcoFA`KVIQMsidBp304~;WE%Z(wLpRDpm6b`yT(Cq{rJo*rpVQaSQHkhl z+=s4=?`z~MtEjxMF&Hzgm_`eDh|HgqBdDJVDDR;uli-wgiD{&(FC_BumjW1D=!;9SZv_!q-WZ6)TQ5W6 zIvRT!I1nM3x;iQklcgh zqtzf|4c?9p!bpCne-8rLte`=I4FibI%!~|*Ob+V$%t~YK+MK513*J{EI#vrsj^acs$ z^)~=dw$Md4VvTI2`}whjR^2E&e&|MHm({XehPsfx0PM^!D5j2Ed~0r!OXZiFjM7Zm zL1be$oGff1J~FgHR=)K&OGK-`*)TK0`AX%|k@vD}+8!D-#JCDwoIk|46bSX{5DAqI zw{T)`>@7yJLhXkcIkf6lqeGfCh0Q=fnqcDi0yyZ$*;|b}QNfwvMiz~}%}BuL$KD3E zK1grhW@L4>%G?ktBd?NykF-*UhEiF5g~+m@)Vn#Aln>KWsl!mC38wnWp+;_2kX2U@ z*4il$ffIE`9lq~{+cp5mgrP=HbYkaF;}YQIzlR$0VbDMKb`bDw6ucdTwu0(zH=3xg zyQuh9AX+jF8fLVlYlazaghB5x48-?|VMc!PHWx%8oFJ-Zp(HETH^pOR|9zNojRlT> z-Vx4>1N-wbr&yW4y~9Yc>h@C3aKl6Yz5{T)YPgXS?gc$P+;Cg5k4 zGB8Vn@fcujI-LPnDWtC_d+ri%2HuU0?{|9Q?uH&6zT3Dy(10-V0@+~M6$@{G`m!sGH+<#A z!kaKQaRZzx8;D>Nt~|?BxhOPioQy!S^3jY?4QRxb)3SRcx&W2Z=-fSq&%GpavTmop znCg3P2L9DEeoCak;m2y>#LRP3_XquSByf^ef3+wVv)7 zX$;B*lWv{g1)!&KjI%vjS~p@bE}ftkeWk9yH<&==9R`T*6sVSK~%IK+1 zT~CKcfzF?!q|wH8#*Squq?=3e&=#hkHaoQ=m_E9YR)%4J##`dK0y*Sdwy(PBB(+*LO z$3S#X(eTHxJD2Z;!$<6*hv=2ZAljdTr3JhcXR#J#99!~2XZ!&RSbWPTFQ;&OaDC~sA55zTuX8^%F;?{VPS5{iGq=oVXz zHNl;{@(GZ!b~Nb;<2{7F81f`o`nh!CNn=yX{Wf@XFj$|1%LX#UCGH<4z*J2e2mi_i zNF14Ovq5FD^K|X~xsWsf4Y#Wowp0HRsT_1)qBc+lI0& z`0iNj8@La4@>%SAJLsx_u>rK_uYfTf6wYfiqh-&Xsjx`Y3NT6D6syWM+RjvdO$FUR741W>X(*K(`kjb- z^0+GSLl7L$E#h8vt%9&wIOWz4&Wp{ z^KIF<|6Mr)0@*J5d4|yykG3y?PuNKVUqbb}=&6^GxSQ6$WIP-y-CmYvcE>8+eWolu zYNjl`Vx}zp=1f`o2Tt5gt!EjFfEfE{VQ4$)^ep2F#_w*kjqc*zw}H@tX{!~dgsB9y z()c_XpVe>QrZ;CBs}X_gkvV|GVLCVmJNFOt{TwJnf;4PIZd*vM(?;aBr1@l|0UqUK z+|zt5lhuBaM2o8y8Eu{4{Dz(`p}-=rkF`XLj1rC+ z*kQ4e;hMS8!d0g zPaNg)WlDS4Er>rJCQpTh#g4IlcfVe2A{5`R5((9~QtIw!AA+;6_=~bj`Q5}6{F2R{(=*lmg;4k z0E_N+%4H1>bU4tJWg)`EIWk}t;&J%tV(|wD_U;vC{K!NcJJFU`j0dq|@|GI6+nd5~ z*9%Lr;YvKupd(AcoLAGgOO0MYki2C^?>=99tV6*VEJ!{(g6erce(}Z$uanw0@^b>m zyTfOI+-6R~xd*Q}5NS-|@@3FZt){P*8EtaEm6E>x4)XWKP&+OM1(`?p zEjRkO#>L4A-c4(l8+}uEV{$ZW>t<#dM_4}u)fK>0XmN}(qfN&zQ@oCRTSkBGM!qc- z4?qmy+M62kmcl1*UTjuo(ObZDJ23spGUG~6kX>bxf{0g**$vBO|Emynf1swXfk4;N z)vp<5k_8AkQnPd=EqKjnHSlel^aB)QdSpm%X)?e{YE_#6y^3s7f=hxz4n8Y1crTBA zf`k-<=wpWDEZFZG#b3hXpzbtM=TjeB}^pmb>GnBRltYU zRJ95u!7h0ErZ!xo?X}I~*9Fb7^E* zxcb#V7zJdFa%&(Ezfo=^sH+o6Yo6AGPLxBY2vYVsY#^KIj&(+dX5fdNI80?G#mQkS zOj#Z{&I9p-t8f7KHm)|@hpOPF^E@5<4kJ~+2#yqE^pE74cLcQ)9ww>pRJ@*8$b+y zqnM3GQ(Chv=c0^LOfKN@y(P#6&wqP%FnE~m-e}yXewaeV<20R4Z8UO3A8+ys*rg;o z3^w0!@=+i_o~+HFoY$dKEvIWr6>X z4v5l?CS~Xem`ys(&uAD@0mR}8<7Ov5!cQZ8Utug)KB8G~0P#Me@7^#ThFiZoHXBc+ z9A)Q-{b3Y}%U4V&w4?OxW@C54F=hcP0d=qTF>T(0a>poTt1((RPS0(HqVhP`k{vv| z)p$bzSF>`vVff_W5j?eA>at*s%dK5E$u z^yv=F`Z@Ad8l7W5N%6(v)+n9WXJpXBl@J;a{6E&d1Td;1>3`-;j>-LUkucrBWH=1> zp&SAN4-pj)Kwa+x-E~!54_wxJohT9GiV$3+f}+M1CAuPUKQs{$!f7~^u*+&V)Zn5< zMF`4jRMh{k>b*Bd5?uHDf4G~Od9S*=y1Kf$s=B)CK`O6OS0 z`FW?GE&5Z3Me}IqW}tN6A(W3FoS9n!w;GEN(Sa>cxNIUC&mN+YTd}G9ehyV_2nq<_GCTn#?%1wMZIE%5ON^zB=Lp*^4ZMxtk$ znEw%x;Dc-SPpT{8qxuke{>rSgrD#ze4FO*&YaXW(d9~5K#P5zUBsVu%YqV_P}wY zTK3t`kWd9%L3uplEd8uj_j%rUPcYYg$C430f&xsc0aN;An%jVVz%{t&2d?OSHP`J3 z{ML)lhQ9}9;FHGh4E!^{64QhydJ!UvljN>?zxg5c`)6Qw{||F8O_2~F+0Mu|8twqj zsbJ-t0p9@Rh+7oLg|Vmn!9N4T)BeTe#TuMy?#G#tK)*E9A(kaSxbU68+&)Y5LIvhi zc}RkbH{5witpTFof?F)6Nx^~>eRY@N>A%Q)7w*NcsMouJK@bubyo-rkLr=d8vwlAf zZiL~plCEnE42g*BzcgYS=xf^17|4aPHDga!K7H2+uisMYxhqiVt*1@9Wc4|)3s&Mk zY0~b%dA{bO3+d_I@CklN`*sJ;G}eDW1NT5*_>ykk6Zm!KpN@jDQkb8exE`d-!HSF7 zy6^qKiN1$^SV+Zt1GQkaeS2YX*Hhtpf&P9da=3>V^6jkmWG$chUf_0EK40*!os{=J zwiN$KSG*s%uJa=mWLjP0>UX38*Khn!1?dm4XCl=X(0v~S^8K4}VJx(VJ@nKEB0TFq z2=w;XyAQwp0LI+%nx#!x5J0W(_QB*|N&3UU8A(UBu>*r{_%QG%T&dUkVcU^&3Y?+@&SRoU&MfXe?0n|A}fa1^lPnFECo6t*fAJ47tnx1JLAnp+Iva`!NVItl^ zJ*un^Tq0$~^*2F97`dQ#k@1mMTpTVflEr*8^b8$HAHjlLz z&cli~sqXVYksp2_Ua6m=Pd^U~$f<;C79KrSnJPaQ(s(tBS7vQtW7exQZ#vKD@c zl)GsMp+J33eGWp|*+?%Q4E!1QAdWi}7#gWz?jdYA@1Z9T1$x@r6@*BkXjd$Fpj~}O zyAB14h9h`WF2BV+v(Q%Ao0`Tnz-8Sq500jKJdof%vx1e%AEDeednV$jvgcG9)Dj2` zE?MsQ?fASy>r2j$3h~e3E?0zUw73r$qWLAds|7*>3UEtcVlr$uTt~W!$`1#MyA&f; z3nG`(Bo7OtV2wt1OMnjVt}_VTdN`1kSd6Dz=ud|OBaOEZV{|ysHxQpmXO&RIFnxcCeYg`dz1Em1IfCKQd^-&KS4LQLYh2FFSZ6Q@qS4DZv*%G z1|pr&oNohrAo#BR4*s}*(xcx6$k@@Q;V*O)dSDG*dlWm!5OS*(JL(4gfZe#~Y1Psp8M=Ssh zH2gIYv5b3dTbt-%X;yg~p6axZSH_hkit3=f!&EC9VOKcg1lGG^w z65cyWHD92slhpD34u1nC#ePi0`JQFn*V^V$vGoW)`U`DHQVT*&(8!oVxP8}yZELI9 z{S9{oE`zjMUZFda)w(m_EsthvW_Q9$Tv_gQH>rA9tB;^gxEzPtcbl5mhrC$uJpDa? zWZi@l3!%QoWdQn$9`LJ4pw{30%0j}@ycAUgC*iOZ6+F9{0T!8VxGE~(i7cfJ^JyGJ zK$|eCWhssHv9mRQ2Z~{XLAV^3Y4ex?o|dGjN3cOOJXM7pDRFtK>OKnlgpwqhWB#C( zFi*mt$evgDJsgq8Zs<8s4B1xw!3|bU7)O_-sUmtWRrxVlKTcIWlSD4OL)mF+gnuA( z%*`I`98FDALovJ$q^S$CAwRjY7h%8ImC>Ozm7fG)74ajN?e+*zBc9@q4P2n zYfz77sL95I+v(#B#SZ4Tb5$pLAyeIxm?6B>KT8eCWH%L@RH4pRod;f)8?w}G!4Em8 z&TIA#X|_dyO}|qmNE`QgLl{10(!N~PnYx%NIiWcVSJjw#svAwtQi*h?sZKTib%Y)@ z70WZU*e`??*G~KZn%K^d%wV?o3?#W%C-7i#wwh=PQ~KfC;i-NpsvVTA0(P}e;C<5U ze?t}QLW@lF&8|8XRjXiOu#MEdxM4Xq$t;Nl0PiU}xxgRF?4XK)d0?xGpt{z- z(SyOx#P*@T2-Cu;4zE+Fp-5$P74|V5_F3$g9Gq?OIn-UH(%g{BEBIc9f5|by{K?*6 zk6-~Tv8mmo$fJB_rzcFu7U=+6ZXm5Xg_HSsgGESa@Ny3bsbzGu2l&K688C^owx{X| ziT_DYHQaq()k|eU9Mtqu*BY;EqN3wu3jOxDD8zBaYM6fzHkSPgtNGRV<(5UA$<(j6 z>eXdCyueVFdBAtdy4dKQI0Nb{93^jUqMLfF98cC zMy-9-wBx?XafBUjK_s(<>9u5vaBMW%3uW0n8SbY#caBI#tU%yw+?bnc3q*RkpX!+> z69u`<9c<7czC3z3|42;@A|9GgJTptze_Qw&u{&1JxOH$3T@<@QDXUX`EKd5~Hm=K2TksCjL@sb{S<4QrDO~(S>gIG+)9M zjiU0wDu@0#NSWz8*Rxb8n}rY^9;7bEi~)fzG@eeTse{#I`gX9&hImOEq9$a|3tMw> zY)D~CSK4T$X+zXpo~JEC)Ig{_!Q<6Kn2awTuNLP(7V+u7dd^V89!bpV!NXNoy8Q%| z`J=)WM$HkVXE&jnjM8M+%R1APPQfOVmf<-dYJl*P&tmI5*i()(?+O8pup5Is+?{) zQC)y*>CR zji?!g!ShnywtL-ea0BIvJvKH%&QJJ!8}K+6ihf3*^Tqs9+vCiyJl3S!beR zvoJs<6V)QxI}r50{S*kCW%T4J>Yu5$@H>zmN}=r26s`syt4uhTaTn|pH1Jdvgw23P zYLUnwbFy+B;f_3M zv-H79svsh0YBJ7HN4?k@Og&4T51$c~;$iqzah5u@A68G^;Q}eJA&5O!#BPD3agUM(CNy_Y5;Z`$|tKK zUA_aC4fOVl-MWN|31gx-jq~M921OedPQi-2g?diGy!ehTn1b$Zp}AAk@HCkCa&c4= zto(*4O04`GvGOya8Ng8oE8m=|&IJElHdUR3TYev(iemyldFUTgRWEN5eK%FT#YrFZ>Zq51tuV@YRsR`Inp{nya7@DRzv=DYdN>!XIEpS94BC7bnlOO z2H<0%ThHj$1KpKel{c=cWNS1C)(8l`o1}pWjC*Ic@h47Ja!H%uKH!5q)!1GYtn3O+ zaXo~>Ob=&VUW^|X^bepQWbC+9VO$#WWn8#}U(W$nDh?|@d^!%zKxQ+ZW_CYg>b!~v zn=6fUbOCo1o6d3c`Ty-_LCqN#s1z@?{Fs~)273nC(RcePCoA7gSO4~!g!6Ie=-RW+ zzTmpclP{Wd@g-fn6VJHtmfz3(!{jMbi@Jq!tqG~P9&Gx^VR$w-Hg1q--eHj!=#0-T|Bs5C0Y1ID`70CNoRD=uTsdvdGsz;V=wSs%6aahVMFFkyT3g+83 z=FCv4RFujU;nmtpRHyvsWqG{2&8yE*s+$-xSn%MSv281FC|xQ@=L+tpK9{PKMm7s3 zb7JyeaoyaLa=07Vn*%+!jAI5|%*y2)zQdEZssxK9xz-Z0R!9q+7Rs)s*r?!( zh#-)qI>Tz(h}|?%EH?>xmb7V_(k@e7!+YS4K%pwA^swq67_s^6w9B|*!;}pH+Y8{f zzRDX*>HD}7@Yy2a7z$v+TMD#oeM4O9Qie;>vzMuY@Ln4UoFHN(SeCUn$Uri)Hl;Ye z>OZ$8%!6EUEN3iS03#DR(V6-X-h#SaGy7+M5X!y?Du@mY`40f$I$lMD?a;M`{0_Uu zXZf>mhx1QAWz7ou?Gye;NEI|oOL_j3#8MyY9XD+?zHKug1{X`;OPD`GIg^PM^4lx% zAAx#B*eM|MD2gf%SOx&>ff{CDflCgWV+>$xuY_K!r_H=rs6Njc@4-kg@8qKx=N`84 zkRC!#x*f#WWjh@Kd^QhbHvW%sge~BV<2^KenhJIjiRc&oCn8Q?t)R+jsvustw_Uf9 zLY$ZMe5fGp*X@>RDldE&IE6Ef1FV+!sU=WGR128t_cMFjrq!IhP-@0qOo6gIVQq9O zl*;7g{TayZk)AhMUBTx@{$$PvXImSvx<|xPWbi>zmTbwzVPkI|lp0(EeK*$)4npW* zLi*ilm<8g4sK^T$rW+L`8W$=>+XxGi7CE*)X7H~ooO#NE(Eg0@Z8m*bGTRh~3~9TJU_SUWZnGu5#8G2fpi&8n zli38};P4R01F?hdIHKmZUnLB^MpdSmL`fKMX#(`XA?&ZO?N$_qI)+JNwH4LEt&X^MS1g+k zux|0zm@Tmv+g+A{#}@5>-`7lt$C%Di@VBb7_cqF0pib9I-Hu`1kd9%i9aNHzQBj2| zNJc+Gspv`FAWM9_uh#{XJn!17Fmn zjHt1`c+vY_MFN!T{0@}sw(^&&7x`<)rMI!0=5@*qr{Bt~7&l!(2@$3E6j53xlm6p4 zS?rIOM7J`>y26ud6hXjNF^{KRf2=HawYR*mop>-RH`~?3MlNIy+`KaIA&Ekdtl&S3 zhRASVjP&NIDab&28zLLRA{=<7Y$LzhF3lDjIg?TSVR>5>`kelMRqI)g7G!^EXE4*bk0CTNoc#S!IP|rZe1*N)@kgv{M$t ztS!!D!4;Wrm2UGPZtsKRyMXZ z-33r|Es)EZNge1~?YhoA|2?vkA7eOg{-witbIfr5>}Ue8-Dn~ymfM|=Ivma!|8<{c z{NmBP33n_CCmwq^0mAG2FC9*2G-v$c(L6gkoXEZ%Kb)Y~e>t2tQ_2nMvJU+rbLK*1 z_wPVtBX5?d*T2-&RdD$q8E3eX5i$iIl^tpp$~3>mWaMB0{K+#{^Qlhe`;Ocbkjs&! zrH$L>^{>dfQ56gj6Dkv{I`fC?6R}Y|mQN%kcs0Q|O~GELy}7zS&OrVnt+`QM96TWB zns^V(w#N^|*|=~sjJQdS>~6aRN)R*vNdj4IFSA$$dizQwC>;KPKi{N=bV%)w+56P9 z18SoSG47rJ>{u)avxS}-1SvpStBld%J@QmrF|qeN<`^Bg0m=+9Q{)B)h@G~bUJ#RU zIw3vc2H?E9i+Ow?YH?FgvPG+JR-x|l-m2<|81|JQ_j<`YL!C43AF?v8GGZP2Y?Fd1 z5KG2eLv^Sm1-mUf1#&|i8YNzuKSLFJ!?bdS>X&baF71r#-QD#44Ap&#J-mD4Ur95K zh9n)a!bLbkkZd&x&W2U;V!RV(e_X?N(v7#MPFZls@|rDN3?WNRk13S^^|6MspHKzV z|FP~pygL=}gd zJECT@u+|-nd5evCta^SVuD=XbY_!ZTucP+d#yys+>-^t!#*^>Ne{Bhe#_s<6m(tjUBbpD6NRH+2pj+#HYT* zGv(}j$5nMJEk`St*?l<@|B72_?WmPjX$3k`vQ>h>MYgVF2hQzCDK1%zlIssN;#Spf zsNKYZ_*Yy7(!}z(#FMKcfd^$KExHw9a2sjMt*ZMk=G&Rfx83Q6+ptw8k`-rdgmJ}L zC1ysV&7m+din~?X?PEY9W`@YWN(K*%%AiB#<{z@g0D?i6J?f z)Zz5zSjZNEype7Mr1wUee}~EoxP2%|WP;zo#mo58ChG)RbB8K8L!OlL6We)L#$kqT zp>iqoJ1$fqAj}9jKF@}4mDfDPDTuL>R0!DhRCK4xr>r|w+JH)VGMx)nwJnH5q;A0~ z0lSWFLc#Fz=<{lMek~WTjTVn8oz`Ey=3&RZTr05GFf3nJW;1I-#M1ZR)K~tG zoGu-X)rekiy&45>72qovxC9{)E{UVCHBs1-B<8cr8MdrL*xD#;nZRDgu;oeNjw*gF z3S2J0mojighZV1k!d3|EB@A2HA?)i>*h+!DSiI&{9Rjb40#^y}MO<-php=x%VXFo9 zLWZsF5O#GGwl--vZoy8!fPw2e1YQ#bu9J$9Fb`bZA#7a~wq9U=#jp(>!mf2;!*JFM z@Occ}*dg#b7r3{tQDDzy*rpC)*GFNS1oj+;ZSD|u17O3>d~O!tsSMoGA@Ig%#VrCm zg<)GegxwT{Z57zb3|o@i0k3to-i!j5Br_L_SyPrwgJ-L}!g{aS;>f!)d3zRDRqnjq z9DQ3ZZ_nhn70%l&(YF=y_6&Yo>Ac+Wz9d8u80& zV)USs;%g%czb5MU{&u)ot#G@waDn4Sc@J3*C~QkJ+@q0lLE@wADUXZyBM>6h#e;|=%kBz99yV18ib0t& z74SnZfwwH^+-MGeZ6$tMRf0)Re5*vjo2}u!*scbv+D3+Js$xg8t3Ff`Zmc1SPkk&t4T6uwAdL<_ zQSEsXlRE6(>yNcU85S;CzZjuSP8oOIbDezq<1pYNT&M|MIKW+i4T=o&kY<8B?=|B` zGw`?frV&#Yomk;?!LBvhftJ_~v`PoYMfj&J7vc@L5P9U83&kd|jnC4Q*iJDN(j%lu zr^;+-hf(0Zb9*%>g{Itxt;I4VZmWQ6iTkmDKO;j@&JL$aE^ig#TY1WK^9zYeSdtRc zrF226N}3UeUS&!Qi&UkspdHJ2BWjxQwkjel!b1Yr;)*!z85TM3ZR~5~u#Y0YDh>lK z!i8$lm3pTu8{)cB$6aZVuJpmTx=2@chTGL-HAef=5ZjkV>B|`IOH&)XT+3;HTx|?0 ztOC3a#NpG-__S~(VT7H6U4W2d!?mlyYK`L45{pl(;Byk=Qv#PP)7E8O7oXD9*pXsT zVU5&b#++1psL!{zZ_4n~D(70@^oNK-sHkJ`wX4CZkj7-J%Tr^DabYtIvP|-8q;|ozbtcJ=0#qX$;$!!vfO@I*oXzn!M(RnN6Fa4i-eq_0nN`FUr%8mvZX z%-)PZd~IZTYZQEra|fzSRl7QDqUlSNBlSgZ_bB6*wJl;PV`u6#)4-oQ`{*9@#% z`8y=<5&n!|--2IO3HF7(7S1zREx_KEyrpo~Mb!yGt&G8J+rGrvOc#x&ES;ck!G9v0Bm-5|8XN(;ih7~gV1#E4qvj^la( zBD|xD6oVo?N4k)x!YSg4E!Wq*IIf#qgbP(*6QGKBv_Y>W{OQswY*ezsSSvS zEEp7QXpcq9G4MYUhXEJiLM_1msDpoNm^)CC!R)%n?m%m#1Em?U94p9`F{Hz>d{EI< zC(AOJW4kl{Ygc{=NQglyq9Ej-qcXx3@~(*ARYu=ogEBu-yi!1d3{n*b z(kTK`B_Q1xq}l-y`C}_uD>7o~$52pJa4jF8!?iYeWDRx|tyLYAsm%~P>zpF4zuG+< zxgm~db~AMu@zER{mu>ulZ2($2Pi4_f536y7$JzyG>rxHZ3tsK5qnKAZH+_ao^u}Y*W^YDpfUI4lF$0?-Xraj|)9UcBOPu`~A>OM-coRGG1sR0Ov|>M= zqZ*IkQ&e(%LKNF{F3Dt$_U;v1o!CSoXl8` zw#$@d@|BV?7W01?obq|tsM$In19Rw6=t*1WsZsXmK(w{pqiHuf<$_rI(J9a5`O}3* zr^4xQ4C^kA>ncM*qGrnIR60G4ieX!HEy;`>9jO?U4%hO5GLV|Dx?)mS;S;%4GiB83 zoR%@e@D1&|!?vM=_Ukfb8q_=OyVJlOoo#WHwo$In?E1f&2N#U^04{@L%>Ws>pWO#* z5FFd=ax@6VHB-FN>30mp_r~>Dpg_JBre4BG&jF?$=++3RVMY@_ZFTAZ@8cV4az)6| z%y8^TxK)+Ie&VX;;FAZU1fSS7vj+m#{yIEgf^RVcu{bMs{8E$2jc4h7mtwJ)*s6xV(!3@j#UyV>YH9g7`;ubYpH~q|5wVMxh($n_yp$_L8 zJn4!@r`{%pHHUWsWQc5iavMB7QQd?=u{381BR4oj+D?j{ZHrunG7UDJ8rzg{d4F#l zmCy`aT)$x=VfeHD7m{No*^*F)TUm!PzHywjcO+u#t)2Z3+fK4KM$vZ$#SE$@z zvrUy2r-!cl%hlL$6BwY*ZL#UldJH-ojiUp%2zLEnatjVW^7gjg_usHe311Hh)uZiR zVTsAC(wRw6CLrxi={aUBt1u{na!iq*8icsYul^8C^!Q92zeC8=D zN2M#dy!D~2I#u9eUt9NE(zZBcs*QjAUTc*gU&zSw#obuwB07lc2UrnmtZ+YggK8u#pePW8Way7clmXHYTy$ zw>Jjygmi|&lre4n;23!C@1RH%t_wv2&0GYhaU=9`C+L9=$~2pz2DG$kCfzY>j>L7) z#;ir|r9QSAAWn1o=rPqPCSF5yfw52EnC+Oy)!DyX4QLg_+VgU&5VVsB89BFNm1GN7 z#L~4iJC>UP47!$N3z5of=m?EmewY(qBrIjhvV}T87n-8(a(>49r%I^UB^;7B=J9eOl6FY#pR-e~y>8KPW+S9Nyo8>`%3=ON= zpzF@87vp+}5+G!iO~~ptMO;Fzh!hF425Xn8wh39=ri@F-Rgp5$BDFRl>)I4?3ArJT zkajC|@r2Bl)15yVbBptriD! z)Y8mXF}U_az(u%F zNeYP(46P_KyXAug%4NmUI} z)iH_S#$IKUDhWGUXVfyG>V;5!@vSyOsBP`4u^MbTar5=KfIBkg4LLmVv#I|xO4|pz z+R*EPd341y|LH)NoP4)W+qOT@#rH*rvtwx0cnn(YjYAtHYC2NooLXiRJ<3TV5PO!#NLqIAc)G#VyiBXx$Y@aD? z-yyCQ<-oP0P+`|u{ENlJ&`id)N)R#HIrv|U!PQA^6r1>FeHh6%`O!fBaM#|GhEC;XSW6>O;bMKn);#F&0imM z)7NvLi|nm<49AhnC!fFap5;W$;H)fza7=bWG>!&O6ZoD)eue8=5F(Snx9B9wbB>Ea zek=}j#vyGCy4-_VGh6e<}Ah(R2EMqJdre#G*I7OAns3%5ymc+e66LQz<5TL-v z9KEc4@QFp-acfLBH)qiYw+A{?*J@mI^A-K7TD=u+*c`cD(__`+2Y1czK<;?>Qr5k( zuXnb4-5mS6som=%v9AxbdtI_+VPE%dPh14v?)@C(kpl93Uqa4?9`o1RD+ui}cIzwM^?Yt*Usvjm#|s=C1Vd^eTWqPlO|R)>ggySlBlsIGJm zeN*dJ=d+8>-b0VQhN6$`X@d!3OYEZiUPIBR_E5%3x2VBIaRdHJ+^O{R9$LOqb;6bL zn^rQp_RyCrUF>i~!iDQx&-D+8=_#KYW5Ur(ucO?zduaFTLZ8qo)jjF{!?>jLX*y$- zDn8RT50K#u8Q_xnDh4~}lO0j(xbXS@C_b^_pPt6-=^R6cf-R?ic#?o+6U?2oW|iuS zTXElCrLMyrk*B?({)P+izJCLEH^ts;i}b~-abM~axqKB=+IR{c&CjE1Jj!_PP#JE{ zw;sZW`AALcYIUpE*i}L|*Qp`KmnF1>KWE{p$U60scPl-xR`s1;nkpCbVIpEL6?=7j z3Yyc8a>BVKKb9f~fJ|lca5m3Eo}#$2G%<|~Z$cP(iZ}(4k1sLhxsG&iHv8iKkD>(3 zTQBeb*#8B6)O8)s+kRX#%QPEO!7;cT3Jk!l`S=i&eWSKnl7erXy@rp^@^#d>{nm%; zTk$6t{XK%;7e;<_5-6l}8pQWq43MLfmbZxAJ%&ZNJqhjcJ!yP*)R~4Aj{&lA1U=@* zKC9adD;bGqq)-$DR~DiszV92t2>tY?Z60Z)gcCSNQwue%Q+>Rtl(t@t!N;WaxISu?Z_!TZ-Y9FE1~b*sOoa6o!h0E*yz4hyPVG5go`>~ zt=YIqE${;D;(Arq5!jJ>bs;a=&^f<*rxGqS6LDXi6ZH1R-6L5zidqK{ z`YEm$O3Ahi1t8dH+<=8i6oR6g_DF~ja-~!cxT?PK;EHwj6(%PI*0)yc0 zm>gWUfJF!!AdCWv#2^Ot6~yovL9F!A65QHm-e6w`k|ju<4J^%C`>I}K!gl%OOxsfj3rg|kMn1911k~Djp`rUvf=>I@3l42(cBOuVVFdgO? zfLjuA_BF{3vB#9l!rkA&w^TPj$d!kgo=T^_rFxv3_&dy9q|64Ta3;EPSyf-uAor(fYfZtfS(VJC$78*su6;6TV@B}`N- zO8I8}BnzSB=4N0s(AyeUF-&`5(Uv6ce+eU1775~x>-U`G7=);1yBc^&d2$g76dMbY zWn)WDZF=mrq6n9|6(z*=8P$U9@;8c2RdVzSro!~RM2rcz69L@!zOB>{7Z}8f&?OcP z_DrM-J988B}Lp z=BJod65|;oMNtc~zRWDbp;_jip++@T>`*?6Zc z%Z@loBQi5W0O>!XIo z{fPyTW4&BCh6^YiOlrf9@RLC(a|BsQPEmrS{lh}t&oVF{c@sF>Be?+41v`XQ zoP?0lctRo!)uo$FNW`x?4?@C8%|b~Swa6Zp$c05Vl;EgRvbmaG`iDC1()J6ptCy!s zImDepIf6hL0PPLB2j9o z1G0p!+=;tPQt7Up5Mh;6wG&sSrqSE@2*a0H4bEbpVikX4)En?=HAX7Jq{VlvkBE)6 zRfCq)Sk{E8D0i&lZ&NCkRnUfonQ*BTY{t_xeu`xdPZ@hellp2?gIP;&lGL8&Qp-1k zFDvvR=L$qvDv0Q=6$#ec9$Rd4TvEF=1Mx3O);4az)kLf$uE+2tFkY?P4H%N<>mt^V z(!PJH+;A&85KOi0Gys*PN#YT%CWtK>^HGt*b{He5%?d2j7IC;S5(kl@@qAK|=CTFb zeSj~IK9CWUu*x*7h3-Xa=q45n)MELr&rC@0diXju5E|(4>Jo=eY<#Jd!$lyX9$SeM zRlH=3#}gr-@p#syUD5_o1g5$i&WKhEqReuUmAQd`U{D(wKMo27gUDZ|Ja#9{mLx}N z(pm4Sg8uu{AQ*wBtcmUelPovWAUC1>y{r11f?!Y7+JL`N8Q7W@ z%V@wTmGNs~`~ZtUQP3$PX2{YPHmXkH`2Hg%GQR%^zI1!*j8pW1jFW^DGJ>KcAV}N% zox6II`f72+0IiO4>Jv_{8|; zN-`p~+mu3hV{CIRLMfU5k-$Z(>}7oteh#2^}PZ0 zGi(K}bg1OEI3kIMyE4;V()VY|ef~)E!}@TV=uATq$iz(P2c5vu2^P(L8=;xaf@mw` zRtBsFTeED5MU%7M3r{yoamcGo=IOL3Pje6xTu&G~VfBdm6VK!bdWxqjGM)GKbzwi0 zc%hhR{KS61tWuIillG{A;WU@I5%>hAMr+Z=u|<_-D>is6M8Ic+=d+?YN((u`mCf>q zslgY%0IZHH1W8-?lLJt=2E;5mno+44O2$R&t$UOXOBf0WfyD=MnJ-J@`Rzoc2-`AZ z@H^pYe8&+&65nxzk+|J-W_R>~%x;MiNQNSYQGY!tz#1>EnUU!t>ozcM!dk(&R6C#8 zSi;CSJwT|5(}SaC48uuouc1b(DY^|u9w3~W4Ru=&MBG7)9*E|_WkV5pFx*$39ULyK zUB-G7M+cxSa18p7&D%AyY*n@`Ej&{}r@yCqW;i?aHks(I_f)To<7tGA@^~6yvpt?h z*s*sXFpa8Y131~4K$gA34Hm@QrteXI8wra|=J@&>qt)4CfDQ22`t7apBoM*cAljA? zUiPgWeV97^v0TUIefrY*bh#Ig}kR^Jw`8O4E%W zsI>p4L@>D}hn?Z?(mNlh0f9Ob(<(9OvArVf6+%8AF3B{z4_lhRQtUpME_Ff;5PNo{sj+G z4dw4wQ;@OzxBKDhTSpJQ$JH%Tr}Leyu*5-^pd<$$33Zy4m0$|pjIu>F2qdz76Od>rd^c7KCNhL!Dr90F zWFo!)kvd`ET7+Kl;3djsB8J@Q%bjIEK%!NGWyz4MJAmc}pE--heym0V_q#vF_3&$H z<;Tdn@;bfyvFg@y9Va2<3-lc-RzXLzs*un^)*q3VxEMx#f)Z#}a8HyZ8b~jV11g=F~%s0$v<(62xPh@o+qxJ^1e1aT3 zFOzOY9)tBXq8S^XukD7{wAh-54|8sQ+Wmz_vYmvzoNj7XcZWBW-)CC=%)TCHn_vT6 zcMkt~tW~Zb&%?R|{EwbfnE63Zt05MyJ`T>_Zis!qGx{E@wBH2ZaS}3gf~J3pyEG2a zj>oh6)YN>cM)|-#UmZ|qcweKjU#M>jHkd5a1-6=X7`{WqFxS!KgKB=5`^|3V5WAjj zVa<1UuroU3?Cfw+VejUIAU}eGhJPcT35qd@7cB`xKA2%!2JG&I-3Kur%!F0Q;-2s4 z;AGtUlf=ctbT`80<|M9&d9)dFx{?8_@CM5?zXzvgEJY5prMRc_H4&iIiP6|!KO2Ww zM+Dw3)hwOcBNqFrcu-s$!p+pe9>wt@X65AIuULVC%z~2zoDIf+#~d|w!&=9fU@K}H zcY#l3@+Sn;2L7~rSH~bFa2#>w+d4QgK9y!mlPI%AsSzb|oL@+fo|Pp*oY~7i)&nkN z9`Y@2{}?AHxrZ%?2o!^7`W_(wHh3 z0iJEbd|0+*2635^AOc!kEu^SxIBQQk{W;|eSw7DobdY!o3Vl7JpZZweUsT#$-wN&&KWKNy5 z9IlONIVqSZ6QZ6w0zVl3*5fat&!=0)t>*uL&jxxMWBhAE2N0zJ&)8KAnasR}WB>Ss zYL8E**-nhze*O-AY2tV6IK!zv^x;ehVd+}3xd}ax`2y%8PRS;G$7KR|-XFC>p5izO zSHR9$5E0+tL^B^0QkMCt+YyzY!S9%s%<$!89YOkzMKyDeAOT0g!+w-WSvK6)gu-$q zdMdnGYiR4g)ei|j=A-hs5IVcp^VA+NcX{_>r=0txqw@71%N8oEJj zC-S~bUDEZTq#is3jUHP2s6VfWsaE1;naVyvdo6F`S-_Xv$y$Eod$l|jH$HI65eN@j zc2u1=^)L(m!6Nv1tI~HXW)7~H{yp>*a3ij1!&Cxzkzui3kUV21-1D|R;F$BS$_0Q&R?RR@jXg&);diTC^8D9WIDhCa^c z4Q9}`pWp(@Ko&_gVNeD*#dH0#vZZ{11Xy|oSI@gs{_A14c$UqEh0nWQ;or}|ZaFlO zc|Lu?qi^WWxwDx^{L_#3vUrD%1o0kiqSCALso1O2Q@9eINSSn!S9kRxQz~8VMGC$Q z0!*)gNGrcAlsy|+*EI(|^uNgjXDOFKSPg>>$RYCvH| zFdMaw_m*L89Q*raWu>7EE|Wp$8oHl(AF%7~8^J3@Z{Hxh_x%QDAom;h*ZRpb!zA1U z$Fl-S6#ILSFP6IwC#@GLT!B+2SM`|zlu?M%e0oA=d}+GOr@M3#SaUXKfQ>-~Ir?-T zn*F)z-FY~u3Ze%A)1H#&(>V$HG_1alC+N{o-Wn5hKd3B;iFznBpHYeW_hG&=O_p1B zpV)GsK^}Fm+}6Wi)n>VE0c^A+lC`?aE!Nm3_6hKnYrL>v5K~0lreQb3kCaDfE^XQH zWM3;owNh4+?i^l=41Ttzhn0=j!Blry^zvP(Ud%x(zC|8M?Tv{68B@XvgeZ6Bx|pS- z43C|8Q_c{$P)!zua#@YPXHB6JU$OAvFY5sFF`#mfG>8Cw4{JC>;0cyaNwb+ zE9Mrw;=!H5e=3nBnT=sHH(lxRe@8fBOdfTsODRlJ=i{lax-J|t`exkMRa zy-YOc*v3UsifYNA40eY7*!LC{Dn?7jO=C97P?Qt+zGNAD)_~-mF|2lipS^51B!-f0 z7+Y{QGFX#f;aoIkJ_mYX8-^QWsmfP>8DcPCG@BA-=McWY#G(vi9RC+gI=;W>Xd-)& z4Z{NsLdL!odRr}11R7>FKIM8V89BHUQq1*~kfJY#Ao_KR9)gdBDSB|f3=8WH zkNy3LtXhCBm^Y(YJ_Zx{ruvNWen23xsqT$}T~hV0@o{fzyJ{ZzkJSJ|s>#h*Dxw$a z4_sHQ0$O!7XQk;fU;29r@c}+whf0!ZU#3163dYbZeP*|3=3_M(a!fOVDzo&6fyc5L zaJHo*n}L!|?JOvL>2gzVp>-dsbNvXFN}h!@0W>RHkL&FSXIn1APmD~75Y;6|lV^=D z6scWO(wa4Eyni-*`l0Ge1vxsSZ%!G+d0Ux|V&2(U0U^IlGE7Ae7GEIK30v+lrBEiAT^7ow^KzA`VV!Xr^GhvJX^u>5k z+&3*@p<;~i)-t^8DQuGvbE0ow^^{wPv`38wJ6`1!zwI9)J1g2`}o$?bV^4gr=<31+(`W=&h` z0aO)3CfS;1TQF=s$e|XD0}K`PTg(R#xdZb7moCG`$>s*JW83+}i zTSey<=%PH#a|FyfbynNf1(g+GX1+kL7w7@WCfKqRjNF1BUX!{NYL#H}gE57=dvD}2 z3npC8%m__@r@I%4;&AHPwg*fwH`@a^c0g+ibf2HEEU%-=Xk2Hl!*z^honQxRR!)`) z8`ksl2I*Tp{_@tCYagJCjY-k-OMW>S0Q&&XZ2LgQ`2bhI_5nVbTXy1P#eV*d&nAAy zMx5`QAZ#!hJXV&hU~5h}rt?9w!z zfjr-;nLBarw+^4dY+4r3!Juh3cYNFCvLO;o79?K;wCZf467C@TTe%XP;;!UMOe#{k z8+gD{T4i&)m|&JM(o-UDYi>|ldtsH*Lt1zDR@N-n`edK?K+RX(^g~{_dcs9|lK&&2 zfcYxzDAK1ye{~J%Go!z53F)4Q!m9}BZu!fSK{D)<@~u}+^jq2sv@N7hM(RYfyFMZ5 z8YtUVH#)t$KE=D2=5^O2yep`lf4xGPJ@obXxU&Z^+DA|J(3|6a^`=*P>c8O8g}rni z@4lK@z4YT=qh%Wfiy;)Z)5FDj5I$BH>#;~z{9~~$(3`Pg!$B)kh8Y(EpC-;)0W$}B zlvzQ8d+R_Um{lNd9%kH*cO|fJV4av->6!_;FU{?ZHE;*j_SPc@>_SAYtsZjM>I5)> zlWm#%;@%*WmbGjl1^Z~_T|=Y$=z;l1lHHrjaeRS~HfPdXcr}fp{^4d4Dc|G0LSNGteeRb#jXdBQWY&5=HM{lDn zQl%g4tM5am(i#19p|^tm)DI6g(hL1`a9H8>R^l1a9Kx6*tkHPPe~8FOvTzRNM6{tD z%{hqamu7BX*k1-?aDQ!eZZO^Mb696964@pK$S^n4L;ZCKp_MQ7*CRpZPw^2$X1TRn z$PD=#M`nIgW|Mj903B4(p=no!<_ypmo&7Tak+E|t{c_!(IDz5QzF4H~;kNPkI~_kz zUz*b3natXg)j$;kbx)(Is%FhVon#;r?54rs&)sx*uwG=8KT30k=)ox~9!JQm$9&#L zGluBS1{h@D5Z&KckVS)!*PRlJr(0VqYWg0p;W_w>?l?jJA;aEoWIsL=y{-6c3X zH{^@3F$a{^Gso2<-*Vl;2hnj_JwkV;%HcZAshZv$rZc&kPllnI^$Tgz@Sm+_bBERR zv5|k6_72xwxwfOj^;O3G%W2vO-39Lcd(f&eXE`kzp}QI7%W35Z{bytO^ECNHFytY+ z_C)<#kaYWrdK{*>ITFj;b{acUpH>LwO1AsMnlUm)GXpYjJvCD6?21%81M}c72t%n^O zd>hg7(Ygo#J4VOV$37OW4*)+#0U+u|o}~My9A>`rf$ym3B=q_3^x#RDlG`bLjLf8y z#^^C8L}wDqAoz9MndHd1(}uw&v?W}ET%Mhb9xopQ@v)T}#=r{SK^bG?CK=ZQ83vk( zcx)rfKuv4uI{L#{NaTaGXsjM(e6)$)8LNXa^YlMgfe!Z=r-L!m@IRM{ki(sZZHxS! z9vP?a0)a9ueIm1gvrWUX=*81RaN`OaTw#esyB$X@L4-ws1rh#X>EZn$6k>`nldxZE z-1SU@m=~Zj(_r{`4Cz6-XuM5>h2wR%GyjVkqTPw9I;PFPllNr(XY>ZE*Z^OWDeKsA z^u@`*-d@Zyc`=j43zEv*O?@Zm07Uh~3DPkvS7Xqz=O*Y8Npm1;%w0$v9k;MB{UCd- zKd+<&n#5j)U!uBFGlo}`Mb4H@CI6Z;rHqJjGX`mBrNr(w8FA76&9Rv*+ z8-@fO0Vv3lawGlbR2|H77A-tQ>^m4i&I?qW3Qgr7wE9#?ob9ytR6Urv=QJoM4Rp?F znBZIK-qZ9+Amf_T^hoa1!;^Gj0PK+9NEp<|6ZoY!u?3cZGYwCjuJirSCopdvXk%YG z%{g6nrTb3TBYvie7aoPM?YN2#8vK-HDr5sUsN7na+HDZSdOmxStmn^7((97;rWYCM zUV6EttIFDqU@s;itF(zYJW+9mz8E6vt25B=G76okyYr%S>Y2L8z?e-x6G~JWH7(Yi zidJq!Z&P?djI`#p_Cn6uK4x=2?K@Kkj4kWw2QCd9f@gt>yXo??bRZW41_hn%M?Xf3 zziy<5&eEZA@0+YKV2leb9xdLhU}vJZX)MHmnGOJtt4w8Pgkt{A={qq&zBmg4Z9DZj zTc3&nJbaJNquFQc%;4^H%L_M#!!Uork%SrS=SZ|#fOC@h7g}+)J`Q8O7X^(SCOz?z zSwN#F>wZkwmnZ97`r~As#RJJdGEfln6e=AL0lmBk&8)+7WiUPf=!`BkZ0OC7{cvTd5%5<(uQ z=jsWJ{=V5dU-UB8Tj!svZ}K&7Dx(LGw5?$a^Zw7*urj`pwl%!bK@A~DV>?Z^YuKmr&bE#8{k*uQ)7tZa zbhmr7pZ@tPJ;?1l>GLr>AJFON>+>;6mFL?d^Tzr5Mi8*y1^Re=OuImjo`6-xaS_8& zAm)U_nPOp18)jUK$6&J%tHi8|q?pYAEO%YL-Azpwz%rdKVxwQ-+5QpRK!5*5$f@^%!SG=G7n49Rrof%WRAqz)xU1@-UotxgJpTK(1Sc z@rEcwc0MVCzfc#5h3+5fP?3m*gI;unzM}tw zxh(tHv9;gBRK~78w%a6)D2~*DXza`22eLGFou+#jOIm5dH2o|F;InD^#D4JuAcmby z?P6mP9CG#mSljBs3tMUOmHP6~cW{33U@#G6NsR3aLOejf?N?$AevSTprS5I4SWLmI zv_4_QV%Aq&1@4p#!wzC$aScZjzXY#g>~5E1gDlzW7SqjF>7Hp(YI$kKZ*_oHUZwjM z#uO=&;pQce%lv@WSmvr<>xE7)ySMM9(p}VfuUA+tGv8#2l@p&q3xLOYgzl+I% zC|;%lM-(L@bQjsOXG+MAg(Hky8>b(SqjLp=gJcZOIml<)_@K8huA2qFAFBc}bLM9{ z!h>=(Q@ruFSd8}5xNCHOc!F-cM)&S%t0Rf#LF72Hy+ZK9Ax$&b6gq%%97Q{>(Ot7~ zT+%w=WiAfE6@e3u3$N8WCGo5bY~})%dS0u$75PqqG0q99y=UCQ6h^oOc-%MBf|`hr zP$FG@t?tno;c^HTvkW-ru-)Rx_M%SSuSul8T&qLLKJz%{VcL4F4(0=vr@j|)b_cv1 z<o_YtIb$BPedMk9Z1N8o_a0fKMP2b)MJ*tX&-ll)w4HY0aWiX+?CloRi zLR@f4K8W*R74vZJcKdx)bDJ(o;~!~7DOS_B3+Z3C>CuI<5+&f=s)?i%jswrj!eIFA z@Qyr~i5yY-B{Y$Ehdw_8W`~@vW&LP7U3&*C@7=WM4rpQxwDJ!0w zx6&OYFxMODe@b+Z7}3h2w^=l*L>^ZlP@*4Dkp+a?j04zH^XGeEDi~jt(2o0b9VGuF zvxIW*%)-d-rf+8HYw&SJDK=8@t`yG5?etcuUgnw=^X|vwEu<&z*G1+R2zx0)fK39U zU_G~q-o9TCU^=oEP0MHNL@J!Ei?UzMU`Y?bVgqj5LX&3ekg<9_-8dVXoTGM20pB)y z&{nsb?m_W?%+@CiZxt^ZOInAJd}v&%G8f?f4BH+>RP5Rg^);qkf`@g4`C|}J4V=1qbbE&_YHBZ z1wttj_y{CmIJyLn5;zxU2r8l{lxx=6(~{mT(|xZi3?=e3EjDy0NzNzuA(z=iK?-&3 zJ#mR_Rz)1%MRcvX1sp!mdxAvZ^FB_J`6>cogDIoD>8uqbu-1orFrb2ja?EXfWgNbb zpc^05ee$66d4t*JcB$e_y9%m)P#2sqj`t%Zh!auMJh9L;hnv_=GA17!u!ht4c}~e6 zQL@{Vi86%H2L%YO?T2yw%s}s@4$zE0>D(TS!-d0)X#lbL()Oio6+~u6A@hdv%AFt< zLXL|9@0VOV+{e2QIN1K-2FT`Q9(Vf$AQOy&59v+{+L~?4Y!H`>84Ca`@(NHIjO6&P z+my~jX~fOJOVPI2$tV`J@T0}r&>n#xJWhfHRACdl4N+Kf+kcqNEO<5Iu;&r*UP~jn zih(U+YG6*`b4tv?(iSrqk5IjreMXYb1>;Vc59_YO5F9F=PcS0`kd4EEoj4w<%TYI;~jQch(5r{&CZ=9=pW#X!P%z_u)!ddfR zJt9rjJUyvb6nQ@*k94SQaYQ720=+O#pOo*gjT49E-mX`UXDTgP3!7xNXN zuyz{E1Ujo+521!~81W0sb&pt(Y8M1=htwp_hl!o^R4H_m6KUz6^{q8eJ)#jPxvP|( z`Ln)rtb-z}!AuS5HEgKZUW@3Cc16a)E1UI}Nmnh<=N5t*(fQ`0?*aM^3!utLok%=t zPr4XNdLPx@GY{cDc&Ec$V(6oK0MFm3aTinIkB{n6nf72gSw|DdOQ6AfkLs?cpj1?5 zL`${79Md<_Q3Tz!M+)^#=%7O#Fk`f;RLd1b(RKy1c$pG?*&ZpItuX{mU`U&<9k!N= zm?lw~60L{^lJ1Ocq0>npPhrx1bK6wSY6q<+Xb;=z2f~8BnMPE=x3GrptF z!_t9BcJs9yOhe2{KXe)E332SQldlu~qXI0wkN#Z&&AWlRErL}Az3ucxx>u5|x6vOK z=^m^DI<^p)Nh-n4?XpGC9ABdM7wIb!VFQ|<(fG&oEvV+o9XdZT9UBvG()P!6F;tw^ z$MgiKI43=Bt5@u3%YsfTvdfRv&fIOXik3^yJ+6ncmi6A_x>vBhmK9fK4|QLxXC`ei z(Mk9$f7LaYsu#mpusymd5r(0gS7&yn!Y8!$J-GgUntxumT$=O*G}MRJ)Adj2nJ_1t zo`9|K24y{|i<6&C<=I$B-oK}GrVF0bxrpk#{z*MD8TL6|jG!4$YDI58sk4(G#sCcT zo<=Wk%`T*Go7l0B2CH9iBDs^w$R-|i;^#Tbfra;Z%6nPQG+@5`>19+tZ{gqch{Ez*KJJCpj8`gq z$Co`-^JvQ7pzGJaQ}f8*^k!eDA0NTB0txQ50tSL%GA&dz<5dI$z({=+fq~oT->+gX zbv_NNfeyTlE~o*wZ=*YE;7%-~MKuV-n@?ZV=%EQ{)1WJBU@V}WQMJ0-`zal$)q$?> zKQP--HtdOsWrkHWwuKGzLF)0E9+>ngQVbqz2V&N1`bzyCSHioVQJt16fZ=ZLTDVe= zxcGI){~^^{3mogh;|Nc=7s-Al%8mUT769T5SucP}1$)(#mT%juKC5XhG?d-=V?Ij@ zR_e1myV`+2gwrf+UO~6@LN08l?^lW;Fzj`FATr{0Sk*g;Ue`m6&y%U!YKVw^ufzY} zVDc7z9?jgIpXW~yE3kAUjaj98>A&Gpj~SLR5lqU`*H-?ZX>u>fS_iXD3<)Sj*7HP9 ztcn)Y4=}NuFoUbtz;}op{fE}*H7Ne^qx1vd zx*m%p+;8-3Y@I%-UkP*o3n}MKbj!Bbc`=677cl~uVhdUy0F%?* z^1V>;AZ5RghljSnm`Yu%b0A*3tcCb)-bCj6=)x6if!7h5u~uK{E3dzw!aD%nVYkQA zoOS5Pp|v_S8CHWsi~=fIr_&9Xvfg#D`JMrJK-V?vu&sEA-dm>!``G)Cy9tYKGd z!W3zw$2RFcmtj4$<4Z&``*_G8ke!`7IG)XpUPEGF$axdBqqmX5dw7TJba(>NdV35A z{{9{WhuX8tGtlcX4^iox*i>0h@4g8y(q{VUO`Q(~AirKV3eTz6qws4k|JqGk>-DQV zcV)wG;bxtMb?v#$I+XK;%t_{Op0odtwl{&3bIKb3>#6Fcy1J|D>3ysFbaiDXk=TOR z@`MC~5ymoOytA4a6KiaPL3I{15;{a~5QGV71RYy9f(}7v?+_-)m^NdHy%8ox@cW+o zJhgNZ=6&b?`O8N?_1xz!=bn4+x#ygF?zsu%Wc}K5EOkqDVg&Xoq4>nntIxb<~qywoGb8vVPK@xi%f11X5vVBSj4 zY|ypRGaFc|*=$V@G zgkdikvleHu*Y(1+@c{*9*I|S1kup6f3SODoo^+Z0dg3*LB9izHV|GkI#hnWU^*Xkdwi$`^AT|8d^$W$V@o%JH3&XMclr5?<}&GmsV zdQ;bYyKx=kmCo3^E*@riMf@|<&}1O3P>xC2{n7`phRjD8#)h+PdxLh+6PnD|AHK+z z^6UDm7vuYur`1Y`ccj&f<91Zv_Z;VS&Uz{S%NPbX;|L|k4lsA8Hq! z0tY_K&J9b#Z(j!eu-z+OM)KRN?|KvgSQNK6A zkvB_@8qC)tHpCAM6=6C&b3?rTG`16Erpv~FL{#*=Y{b+gXZ!Kg^~^+KrOG6*8jzfc zxrj(MER@(QB*HA_5l?~;>6<|~O~OI2d=(mAsCT_2?hH?Rnqsa+1rqB-GmgzI!f{#+ z=P3{;4Wnbv)4j{%HN&N@5rMh*7z2;A#?DM=z7&93$ozrXSu!6B^xrtcCd|z!4u5(z zUOITR`Qn-Nh@R3(L5W@2E)~dB_ZKR&tN-+?@xq;iMHow!rz>WJb%d?VGAsojeolY# zYP_LT)@-i~EOWDNdd=q{GoR51ycXZ%K)N`}v!2N2tZe%v!bdh4lOCAQx34KU z<{c7E1?#D2u5W2+&RAL&1d|~##bC)542ltCq%~U7uSz_Xnxa|hy`Taqzog3UM)F#`G>0Y;a1|2m&zb%DG)Lg9xJxC)_y8E&k$d~I$Tcw$zACF&FSAXtY3r>;J%5W=+D42^ ze`P~mOKOom$-4e{h z`lU&Ds$|7Q2?1~zQreg|;ti=zFiJERL=QPZVEkZvGMKkc^#Ul{tr#240IcsvG8`^9 z^R|asv_YhoUMb3RgN%uounX)fOXb(5ps@h^Mr^MfQTl=@BFag@7CM_R4pO48v|ya! zLLR}zbk1evxidm2dh&|Fx>?Y_fem-AN#Lg+hd^X~(vkL=*OH^l6vXxf4&^d;EVXR5 z<)6<+Ce-w~F{xomN{KBK!-r6lK;^cZavZ=+o2K11lhPf^@I9FlT_G5cv@;)AIU1QL zUTC9YdoC+fX+GH8C6`+_c{u{f!8iN5wDUx&e_lh?1`hI~=5H>`%7Qsnnz@rkep@KX zCU!Gumw3Fb@`TS|3&cVF$H_1ksY#O9XTJN6vsR2UyPJeJ7!%+GAy=o9FxwPz7x}%O z&Z{d&p5O=hbPrANaS$feZ=la3s8J??G}9uwNxI&Rl1Qs=Gyh${{`J`cS-Ye{$Uo+= z5?l} z%l*tL8KF!wW`cAsMOKT5Hok4<+`QQ-Mn4c#elW8)^Im&~ij5Cpw&Ii!kVhwJDE^C@ zjpaOB>GpMgFVa#B#C8yD%dvjAZ?c3o%j9d}M4U8a237*S*k71shQqnvH?G_-N+A}C zyv*z}u}C1iY>Ldc7S8Cl(TFtGnlB3xrGz*?kFioZpBvl4984xTS$(&03J= zi{rWi#~V&%*_&;cRR&OR4HU+{dPMvPko1s5IxM93TX-DdQQ0p__idUVI zL5`^|tt5IoH{VNt{T3vdE!en!C^Jp!58sN{r@YQzCkvy89s%WtX+7JPXPNoWO;M%U zCh{Qi+VQ&i^BmTYoj^Sl=&z*b&`f#`|Hm1md*6;%A*6k8<2h{`y6ut~*6q(Qv%I=} zpwgRRy+1I+ICxIU4cX~rPR1h5=S@nbC3%@)-OMnK#%5+1g=Bs`BSMauUmk)CRcF3ly4Xf2!|J-?pcrXXTlzv%_4DoezYn?)^hll`VXYqxmS_CgaUhSCxYf*TIH zn6TcB7nXWrPI^$}U{U2>+hIRrn+2<86N~+u^r*=%83!~d+>lji?t<~6t>1ViKGgA- zi2nRR#@bY)M{UBG{F=UY6FS^X{p6?`h+a?Y zOW)(yLWq}b#0G0riD>NE3!@)mSo}mMK8)`dyuM5S;=}k;)<3oSC|(o& zt5_z3?n^vut=*5}t@Rw^g*c2{wKs?n4M;p03J9_F!ABghzmD)z@%qZleD}ObQE;$6 zUGlN#$MfdLkb(Hih#QYC{fw3%t0HP*uQ5oMzWX@7zt}07j;^aLH5oI4yh+pNeiHvN zSbp{sJegkA1%2_o#H@Q@A71+?`@iX9dtkG^qAz}cqT`-Zl3~0A$~V!N{5thNBuSAa zvpMb*|02F;!M(CnmPj`$^{HQA^INSi{vtk3(7fvxRQ}2rwB4mQe-R%n(79idZ`7CZ z^Tgo3@Jq}lpXk4S$=S6}Iu7_({4fh2$d)bftAlS7SmA9)>D|7H|Ebsy@6%|{#4`02 zMy_x4%CDFf-)Q^am{}j}`0>Bl39@d{&u-=X>rEZM|Auo3@V&Dfl};`~AX*ZCqgYj3+;0-$5rH?#^;rvqDwh3+Sr{ocYI2wx;lO^9|$tRRXNu;#cEikVIheV^qPIC**8~0j>*Mb{1 zZ{b6t#S(HgV%{Q$L?a@0l)A--MDq!>bTU6n4vFT8236`t(cguym%62g;KmHoD0S@# zE*Ijuc2g<)W9D1Y1T%3tw_PfINyNreKEK!vnkFnt7IPUx5&y~#=Ozl-eiKaheOp*m zA=ohsHIWhWWypx%KO?GY_1cgsBu>X)L&_=R{*F_P4~pzav!4~!1$kJ(siumA;49B- znjJRTWwSl10gRvLt6hO$X1=N!Qvy%&PVSd*IR*M_*hRH&DwVx}u&Bz!6bg%~Ol+j6 zjo*0K-Guzdi+&?t)m2ORWNZZC0a;wbvfL*s5+yoXpyJgfiSVAm%|U7v{|&$Xm_D#T zIVp(u(F~YXOY%HMEs6S@q7~RnseL5DL+&Y5@iAS*RO4Di2vnv&u1HOU!;_1t7+?tF&4F)Tjby% zmZ)lq@GJrsMG1QXyfxyHt#qk~K0m{4m?j97{(F(CO5uvRExZgK4R0=mkMQg(ZK`qu z#nfEd#56{)@}`@Rl!RI?Cr&ew_<*pbUwmU?k5hVjj8GIjn_~bQb_ueaQ($f^nJO~$ zWxC{*m>p|!q(p11sfH5sO$p4nrir#4G_fGo)F=h!$C~OW&~6G8io+-{b;hzr@>;n$ zAnYzLo75y||0Xu6se!0na+SH8cXm@;DtS28l#pk8tVw~!LeNMI3|X{l@N*2}b2+K0 z2|N-(rh@MV)$q%b^b!ZS|FfA!^i+(ho>7_;%a!}?hte;X3Xy}RDEWN+BMZe zh-v(O6A_t&a7tLAy)S1rgCOZMij)Kzm+OMyl}U08QbA$t~&74B&{Mz%ej; z8Tca34OehSsaQTW70H?!PLvxMiC7&TLtmJIRc3_6NNK+>thtkFpeOk&XD0YlV*f&F z>q^(wY3rXwRYNVZAL9W%pu2dBSa57+1x;-dnB;PBkj!5ho+Y4xU_o{^tWm98wGKZg znl+1pIRld9))YY|L)J$um*z4zCE|*=o(O=gCbQJ(Qwd!rN&4ALB<#5b(#X72*|x1D zDh$QVXJ)$TSlFOOY9o$&8HUA;SF;QEl_}4))k4uF?rLiS(cMf8CLdUWU`u0ijkP`1 z^k~U;ZE+J=XPH@SIU-cnG%!(56l!Rs9&&MgY8k+7Q@|pNT6dX#vQ*XovPa%MFgFO8 z#KY!qm-$PuY$nC!EF-L|Z1nVBRwVrDfQ4?#TX+%hiW5x|yGi`CVO@nrpwgpa>VOg5 zV6Gh75Nu zE{H)vTMgE)7pl5n%8(X~32ifD4IB%S zlsrgMUI@v}K#9D|vzL*5kI~0Twp`D*RkEgyJ8w**00*iX^b#YIA|FL zCrNe!=a$Np367;A3FRNv)n)30gKS?mvt`w_d}&IMMnHceJBq71^g(`+`Z(oB=u>0= z7fO?ANYU+OYH(f8D&`!-C0G}Yn8Se)S^FdUlQM6y$*hVD6LSSXWBecD{~-U%GA_%Q zuQITlM}+rEcx?J%7@pf>LcJr#nUewD9C3R|^rwrg^RQEJMf5Y4e5k8iUIy&7Q)q)= zuuEG?MAx(PUwR zjFkaMu5(})0NIGd&@SBa6qOLaPzG1L@VB}2NG5Ed=RHU4z@Uf>u|$MGuEApcW`(NP zFI1=oE*^fM0zv=l3KbpI;?-$Hr+mLYx0OV3uJe5D= znvv8BWV~^JmC-k{o&;Az*3(y4sezGB8Guf~q^_<~hb5Q6XoL?lJxyS}Rk`n2B-UT8 zr&Ot3BJEO6uCs~i$EwtX{TJotV;3B(wZ@BCM?^*QimW zBIyZ;pebvfh}+c0B`R+I*-4bbNoKY|WuEx0*VU-%V>{*K%YdwEE^>n5*UU_BX=j#o zBajL|VZH5=pDf$v#EcQ%J;A-eQ^>f-1@!J)t45@_cm>c!#FcwoAOiCjB{IaG++|2G zY*v-ziYpibA^A-lT9j$UMJj%Aub@{+zv6SF=}mp%;9l|8Xq1sa|K`42xX`z|sT%!l znJPeMEvi$)j}UM~qjU#u#aiO&6ii;18>^bCcyi5DlMy9~jEqQc-7VxtTol9!Wy#HC z!orHUEA`;QYW>(yW$zLJD`J%sn?_&QA7t)0OOJ6f;uZ+siMZtxqL%L6O{qPMXhr4H zy2;sGJlzvf zLPl{8%N5&+d84iCopii zgmLe<+SCtL%YXvc!v?z9MHn(TR4i|j`iM!rp-3Dz#_$$QiHIK!)Vo_>kfYRoUo*Fx z21Nbn27}pej};|SoG&e=C^R-A@MuvomQ#U;x&9S!U7e}G`$h$nmkPb>+DxPN?IJmF zKO0lWpq~VxDeoWDC!t?0tQC?V`)9}>ev0HbsXa?Bl zMOb!twnpUC4L`P^xPmBlwj+`QowA5^2+o#ipgd7!S9vwJN0QYHS|Cv^v$$3trBqc> zb0j&yES~ywrIM*+#4uO6wB-XM3ayGHF?LC$#h8?;gTUkx0q8C)u?ZiQCtWbhKujdS zm^;Xj#t-L;q;sKe<M0>$^mF;0<1D!J%F@5t& zu8#;|3od)o{mg=d$xnKh?AK}5lm7zF>K(wzopn2eb5lP!y^?Qb;OyNV&b%GL3A_F; z;H)wv9PCfQ9tj$XYvHfkmjPT@It<81FqhkHhv3ZyaFNi2cnr}VwS5nKOjBD<#w`Sw zGSregBL7XN1ch?dUO#N-0^U-bI}1IGPcKeWMsx9YaBPp0HnG7K$7eu0<#TBxhQJg9yAeQ{_l*r@_#e#Ts-Kbu>ZKX73Yq-hrR_KtUl9_ zai4$G_Q3j-3oddp?qJ-v$9Edtqkb6O--C+1exQ_*AKEir^>~O+EqZA}H4c~Fv~S-T zB+2h-?uV8{gbsMD$T-#g$jqo1X*oyk@}_Q#R@K~+jJPP|(^U*3mUX&#L?A4t0o#AF zC<1;Ic_6}l87wm?tEafXNnEBP`|8E!Vnsv}lON5)%WrMkVfNl)Q3CsJ}ta zk=M5rr&pyJCArhnAdC@cL4wY*2XWW*w;Hm08~*x|NO{1pH6w_m>t^& zV7Y@tXl3D+I(@~*+}4hn0QB(L$X7Dc#hA{jDF!3dgpAfNv5&oL$dD?1$PmEouW63E ze~GkZRyw(}j7@~0S*=D$R}_4?SNMCV^@pYuxAy)QrLbcB7p1T={}-juSN@AqSW&*G z)DO*FBh)iUTo5jV5}cQjkFcgSfh%Sf;&}vXk1>+cgt=Wjfsy>AwNDNs**q*mT zt9R0t#fZCm($*YyQ@P2u&?*dwHiOGF)=H#8n!siEG%0PaJ4zSw>=|yhH$ zk^bg+Ob9UwHS4)~>8uP_j&y)<2e+|kWeCoGNBs&uLml?%9mLvY&6 zIHko(@Jk9^@Fky?aI)M?xbm#ZJRP1a2190CGm?zdA(tJEkERezM+6q}OC;1*(ci?R zC4=U(-?`gWCy=zF0nVpBx6Kq0n6gFMcUYt?EF8O23UyKlkolW3<=$cDdr5%`@NN%WkchZdqEn?qFcn9EDyfo+4PLWphKWK>#;*l*vwj zp$EC!KvdmUMpjf^fxW#zJ~7b>Sj&|$a$r#!$O;-^*HqM^^m-N1>xL_H!M7}1vc=}p zj%Njt)@)gO2WCwr>1vRn1|$X^O~u*Tywxwa8rFIU+Q7K|mOYme7#bWd2VrVOE`atr z3!3Q9h`_1>6HNm~j75YC?*u?BBCGZ7BUICmmK7MbY~VEpk+lVBg9z1R=%00bBWM!| zW`m$lt~C&eWdqpy4puf2FQSrNRJ=s4F)-I<;Yp(R*aeMyTM@_^e1_PJ zER9K&R&d95Q-0Gx2BZjUG{eM58#|lcYoyw_#;{vMGSWhjQB4eB^z_z|O6fTxRq1i0 z6Ma#>Kq*M3WaE-V`!R@Gs%Kk3Tz+V#JHKg=G(w`7;*o=rVlLpEjhXKuG0uAF3Ew3q zI!cw{B=7I!F}{gG$g|SjURp_a8jB9CY$Ax97{`$f! z_k@dOI5Xrx(rkttA(aLagJfq+8a`z*oMgICL5oiXTxQ}6h%EaHjVHM(`OJi)=q|ywc*Hq@EbGH*?CYR2b9V03- zSkA+cDlPjYPe*Cwdwf`Avyri&Qg4Or?}XEmxeR6}nbC|3F!$woGkSV{c1CZeO@BsT zrrq6C)99`E76<)p5i_H&$WPDcoZ&T>>_m~hKZwZeovr8Yh96d|eq=YivWDoTRVtys z*$wxl*_rJS#uEGWyxez;#0xU-(w|*cay>v|xTB08Af;ii>XwS$U}e0X=1Rbu_mOy|lafIRPa{?x7CA0ql%D)F>Xe z?V*k>@8Nu|zn~ZZH_YqE-&5Uc6%9<_M;tnr$cgt4tWh1yN2_02!S(m(o%U9vf*bGA zC+)3PMQ;lQL*5C@p1`Sk?6^YQ)~45$jIFXS5_=lwE0cNldNvv)#PcYL=NgizEQRt} zhzH-s+|P&dSx)6!dS|!r7kc^hy7JU3U*NM{l@5JjMw@EwmwgXz&eZj<`yNx@_hW1ylNj$~<)t%7Y^aIrR2{*ordvc?@%aVXZ4h6;SP2G)=U(~=~aGoM>8a^T}CL8b^Nh!t}6aEls-Y*Z0Xp65G;~4Uh`&N z4n?XMB@Wg@HB$0Z9I55}T(Wp#-dN!X#pA*!%0lz!alvE7 zmr@E#L}u2Ag)A~eHihUpZEB#vE9+ZnMy9<$W1{AYl!pF7ka^~b2^a^4qB}3h$eK(ZX|O%@Eb4xCs(=3CQbh z_v8sbJ&C@9BW~}ISbnGQ9VUK0Z*13bccwV)pTz@rhco2czSe2H(f=gohU3Nr&i390 znLUm3yqAIInVq;CTnrWmj|gmteOYPnmFpiICBOT=9dUJB#Igc2IP)cP3e!8A^Dt-7mmkk z@yd?7#;X}YeA{*o$tCs=Q2b(s2&H}xZR>EQdN|K=F^|GNMCoBl_#{SRK6 zh)7p4)8zIXvrK(4*pLZfL<7@*z4#!Nq;;>4nm#smJaf2;Sh0uF9Yykx4!(YbYD^~z z>obXd$3`EiP7e-V{C@|2)V9E9K=a4V^kjmWw=$mR==OuufNkJ?cA|<|6_4b=D=wOo zfcfpCRa5$dfjRxbhkL>?YLQjYfwyxasE2JRJ1_XkU-X&Bsyfx_1t5f!iIyu1fy^Eg z_|xtEi@x(%_3F3>?woGh*H#r*`aBNiUhMwWPn?}yjUeQFT;cP5LYBb0=$?5($Il$6 zP8=gNo9*`M06jO;O8VR2({a^z|v}RRiR?_*6B$x_KJ6K#okWwjvqlOC+nq zPg4V{ACzySv)|Zh_rEph`KPI(;0Mz>?mZ0=Cb)Es{@`@AXYji|QW95tX}zX@=zwSfs|vqM{Ig2CbPECdQizJTrsn#$iMWWNaR8BGehp>FM9~9INKr=pl@$hm8lEN zs2BNRPl^nYV>B|rNbq^%_FUu#MBKizyMSV(Ma24yT`*5;k>yz#e5nt?+qRsLe{2VCKX|A+b_q7fZYgVRc2`J%B!f zS+pg0mx0r&cY?7%@pEOmSox(9o63@MpDvrmfuD8yplOJS zEA{i!)ZnsJ<#;X9puizbbaZ?(O`U9659;4s1}l9;-*%a*=ElcIFH=KdB!8DjyAEHj zPUUg#ERx%x2|M-IG#1#7c@`wF#xqy-P;MAf)} z-KmGSsKF=PU14PHXduX1w3Ofr43xOkd!girKVd$jD+}T!LLdj{INg%SKh=ClXAoVQ z)3o4C@*t_)-J;H+O?4&u?mc?RmFibK)K%&NP%-B!*6sWBfUDJ&#MZm-YIP?4Q8QqS z?fN(7(Xnuby2G;W?fB6(sv>AD(!Xehr7Y4Hw=%cy*K=E8sSoOI68BwHk&eDEF(c&Y zkcsZW-yUo~+kKUDe-lY5H;Nv|_H)>YGY5GT6{ib{df=%u^ z(7ISZr4g;C={L1XjL7+H*nVp^JETxx*e!EU$ok20y~|uRH274x{^?wGoP-R0V6J+l zc=&KEERY>GmVldLU(5T=Auke2^DQe7umWO|qe4qhUw{xcMSrzGso=K-y7o?W9uX$5 zxf8(O&`;i}4zupng?FLYEYZX6LVVg!Pr6HuE_+HMVj^zIE*}f(L)=q!mwH)5rj~{3 z>Y62AW1x60kd}*tClHnTM!i#;8W@_swM8G@ruL<-nQf{jr!M4OQ`aMHUR^WpR#((q zzLmNVT~7CAl-voDm#J&D)OC-VIBHozaBTWtA_8j<33M#!5?-R>a1MiGigj7R&vjec z6O+g5rT3^KRenPfv)~d=lemj!b5{7-SFU3HZb-svIxYRl+S+RUYMa`XFghjsDvs935S(cX?d> zMDP75-{N1Q7wDZ$D4wVv|=Z%clcNxE=BvbT5qY?<1}qSou5Q^#`wP641--lOuX1}05@CAhHkuRuXt3&Vm zg8FUrDfuEd0GJByeF4tar{8@+4GvOYtXmCMoZ(?<9~wN%7yt9qm$9MV-=vS}R>Kme zk}&~e0t~o0fJuLO*Ee^oA;GqGy{uau8l2y*OP8y$!OnI)ak)Aq*xs&hT&@lz_`|Eq zvFbmpzg(^w>$aq$SBV&5rnDJh`vbkp3UyIUI;t@$vk39?Si%{FmHO!wsO)(RJSg@+h(*SK%KgrOLVGPZr@(1M%8X9 z^Q@Y_Cg%PX8v;g#m3qV~=D~FRKdY1~{dOZos(@`S%lN0dWfdIii;f?yMj&HxeRK^7 z{a43lYt*4uF{EpKCl8{S6;k7cIe12&QL9$oUHIw*K!hrVDPeE;?iy=Wb__33k3>|uex zUIA>7?l(8+buX%VP$Fz>I@O7Fwppr0Wr@D1#xWfyzNki9FX$^?RQv9z^dWHAx|huS z-RC8B4AXeVOX|kp6J^>R=#0VabMZiDBzFld8tCj=qHG4nQk(aThHo4)B~b_{YS*e?2kpvin>UGzUn+)Fb@4>WU> zHRWp^e?=Ws@WmC_j#~BaUQwT0w{`sNuh4FAV=Y<#|CZB$lF`4 z7rd(WPH}cp#wa4qhwXLfAaYWBlz0{H?Y1!rP12yR6=82@|4FizkX2NR0b>F`r3mM( zC-NvZobON(8nBF~M9AdICZkgZU1NU=8+ic6vPE>qy$Kdo;!{VN*qA&G)TqE-!4i9x zIXZq8N7;;)D96}YdA{b#s7dw;G*|&w_9!@QPU|9O5T-2+FgA)L{zdj9QQ`b-RYF4c zqfvGXB(=A=DOE^)GptEsu}3kJ2ebi28rtB<(wM-l@-B<`n7}RaQz{=5U}~GMH_OwU z;h1Yuh&(5m3bEpkL09FYXyyWSGfiWtN3C5z2vj;NA$U^0RxQ1lPcr-NQU-AwJO`0l zrt{0-`zoW+F0qob*$(SV>*FH=ouUyJxoS+HPs-wnXmm>nZ71)AV*))qMTKp*4&b^J ze&^kX*OSTIj7?9V>jJv>7=05$Ge_xW``Mtl84E#+7z|n|ASMEW3ds>ykcNwqiQ0gT z4TYZ1*yo`TSf5eI?G+Maj$FjAOsoC#O(pa%mF{5J_BzPbBWc`#8c*eKlbcLHihwF3 zXpn_#fIs@3fJVecEE7Ixgq6sEwk6wWbI1pc-4e--BJ)X+4_k9cKe16A#9as9ZB#oQ zUWeyPC{br*2#JwmPF1;csX#^rB=Acn3_r1dxWcXMkA3ZYt^}YkUI&o*Oq^Px6_rI8 zAB~kFsLSrot4`jyQ z5GOl|MDECAek3b1Go+Ry7t9c2Zs-i9$kRn>IUI_YQz1ui8Tjr{(hoNww8bVDG`TE9 z3>ltXp{6`H07oZqeF(kV3`h?EmgWGMFcyw}F!rWlBp>kmVAl7B*xN1=MaO`Mvw#^? zJ7Mc5-avIDNYxvvCPZ)*z2yxxFeL#N1x_Lz04Pxv1%N=JSO7I{fx%Oamy6*Qn?h`r z1M`QWWK9mVMDGJuY07SffI0siN>-U_s-&7yu|jjSnSxuGI^yJ!CrcO_&Z+w5zpLU& z-idVt59Tt;JG_eQg*M)sRMwK0jdWmOq8g!cxfi6Y}ll2es2YV}Erfc&960eO=Y*y4eVKWd178uP5hU4S|zK3Ogf<(DkNe+nXD%P!$6wl$gz;A8NFg?%3q&wN8tdY z)P@HU2o0nd9Ent2gNg5oh>eu|+qKN&Ks z(Bpo>E%_+hvUWY`C(O%-J|$rZ32s+DtFcT{ev5aPw^l97&o!Y+Cj?(z`ALx1ogW;F zAH02(;9iVaI1jUFf_q9SU2L4(6cUzU2tJAnWWx4V*;0S8l^%U8E%W?9p5LY9EaWDW zL_}zw#@jf_QT-ly!H@HOd3wG0NPc?of5=bclI4Rh_B0Rt&G|`*9TP}YnmO)+-^cQf zKNg6`ZMV{N8fH#7jGNw9u>)Fg`SjSlc%3owbc_lN7dC(F8)4K?UE#KMj|j{!)EB*l z4&GU4X3XL3GQ8qL3N4E7Diq~SJ=a1AF>_`ES!9;3mnWR3M~d{CBI3uKDWboAOVy<$ z4gsW6m?l5Ycn=HaC0Vd!vIyyQ;S4%IJ8@*LaPpan!(i1dgeE+DOP#T|;49$STR@UG zUCdNtZb=WBEySi4=bE&&LSO&3QYG!>#CkCdqOVL_N7YmwWu}QG4BgC)B$}Bqb^B~3 zJ9&!St#nGpNrWBfMglS5Qft`779Rzhb>lm#I<=`xq&JM*IlZNW;_XVvhBRj&A_EgN zB%C3Cgm|GbI8a!q-QYu5fC&dJZ|vlD3ud~@C%Kh)A5n$DbC={17bK6kA?+TrrQ9C! zjWYSfDO{`@xp_9Aq?$q8KsHOVAz_s#>&8j=;Mnoe2-StmiaS>tm4QG$;&0tTyWS8G z=#^Y-&j>=92qwf@5lly*mpzQ{1^>M-F&9;)ZBmjm)0K>u6RBph`u#E8`A*7>%9azZ zME7@8X7C09TROM+UlhtXN0~}H%LyCian23zDwS$x`S2!=I4y7T2juQb4OJ`#W@>b{lZcb;del`i!%899FgMtul2Q6Meq_8AE1 zMy9hy`b)8E!_*RIAj59pW1bXX4j=mhv502Uo3uc@#ndH&mfOsM(@c#$?B&(66A;q*9y`U&*fvQbrc-x9ln3#Fn z)XBMgz|7X!m#!21 zZfJ=<;REHc;(}1VfUXjeNrVKL9q#lJSrn4CzTpElgj?9!+#-7tMqW2@*%%xN+JcB6 z(|}WC$(U|^pG8F?d-#}@18blmwjCOJqS@1$-dq_G=$6ndIF%%dVSv=ettakQm>(`c zs1+9ZHxm zIe!FhGk^dl+na(dVR^fNqAe^iHavwH=%rq>;gNuHXlbPG!!jNcASNR>$_NPuLof^xA4WE;+|)U)I8c_IdVaD7zi?u^O=v)9qR_lj z00Urd*db1cd_ljFa~j-G0m@5dQTD4yctulw*>a3@P37KhIDuo5Q4B;gWI9T)IDC~w zX$_r{C_kGoc|_bGrdoBHMyJP;Wz^14+4GD(4YnI$yu?@Z2;Xg?5}7{|lE%<&b1Ye9 zNF^Z?k#U*f)lud?W%d}ebrJ7Y?UI_ytVl4bv*iiLXq8z)kU67KNO(FfQA{f*EPnl( zBN~(62w~#5o?u%rIk(}1*-1GMNUFzYK4#x+h-S&bv80gfc15F^q7nm$BJ&Af$SJLg zWHimwvt{%dpTIh{M3d0M(BRf67?7KixfogmnkYl&)!@Cs>Z|%H4;F*H9f?Zk3n8$3jyiOnFtxrlq-?ZATN%)-XlaS;nI-Pj8OuG z248T-Hz?Km9B7HShmR%aDzLN72pjJXuMJZxEqUYHi91?E#&y{?rBk^Ck}WPVy$qnj z{?HTfFBamXN%1xrX|3XN1Aq{(-F_ry)=L3^s9!Oox|xBHfO^anuD4()GLeg=^zeSg ziDc|qETjTP1bGmjbAwwFw&>kIRiha(kDN2minP5EJA}jlBeIx3$3s0FXvVBUyUaxp zYC~*%qZ`B=v*%zoeoHbl+Pz}L_y_6>G3Fxf9@6z5gMeN#UdDQwCLYEqsyBVA{z$Z^ zV?I;+!zx+3jQlH6r|d=vOlxH|L-OmzpQ-BI`lJPPUo$@>h+PQ__aZ0^gYZQA>&#pc zCBTS~efq1ZFCTMUMkol;l(_L1!DQf52(tB_Ap27x8WBm2 z%3Y--}<>Kn6yLDR%imjJg4E8-@jF=Yd=kth{XtjS*(d_tC+)sBG@=(-A$>*q55EU)`dH7ZL@6y|N-=Q_J;t->MVwLOAg|bq9wu>}j)|I5*ba;5b9AeQr3!`E_Jfz+DXjM+A<$;Xx z@|gr|8lf%9`}ogPh8I)+l?5ezVlsJNRU&K6q|yBN$YYwe}uDh&E*UUA_Gq4Yy=yB-QBGKe)4@~&OSJ> zo;3_$r|P5kcd9sn_P{X5(HHLGoTfi7cLo4MO@*V_PTjM@X%t5<4jJh;mk`q5D&GdoBCJ*Xs{zot<|y z97F4z7y0~ko%278#DNEv@1*~3I~9zCt_S7QIy&l|{2(LnWE|8_56{*|m4UF!^jGqJ zS@@2ep{OEcms9PHN0h~&-*_xdkH@3RjK|*<L)TWubXCO!3VvWwA9k?FV=`$Lg1&m=u!jY2^hb5f-gHM)sv?QD> ztl*oQby?EcwcuJgeHYQEuc#dny7`M1J!)C)Or1Z#8L`71cT4NB1D$>QZMgpyva$y` zzhVR?4>BWg{~%}QocYnfZwR`|5|sw+=5UISd3#Q|o-(j* z_y36a{g^hCyUa6zQA;PWtkl{eEreEfOcYln?5kjB}F1r-#{y zP9F%_Bp$1r$YQ@g&z>G;6FN$`sDD-_W7o-3-#*S626tRJ4vuzN7(Lxil-X}pCd=o` zQ-3+m*$qk_c91iCUs<;xCnP<73=557!$Gd_Vev{v?6)gw#!<`9$Fz;~ZloTh|f+FZ8PEE>u#Xw=iIr(K7?@64ReF0BmnPd{v4zGD4M2cTL zgJRh)m*LCKv4X%MX2HrIPi*Wq&xIXlb=kJczC*5w{Jg^&?2kHHj$rMB)hs^JQME?e z@TzCw6F0Yf``d!1VtwYpb+!82BM~Yd&*mQ0e6B_kQe* zDdgfuWZO=CTP4W{9_0+JWGe^ciWm~aIw_V?B#}QKR`UBA!d*`hAzCb!8zYhbB7XF2e+oFIgt7X17AW*Z00vrcsC@{%}S z>va`X6?*=O2$1jSB_|^C{bg+EEIs$<&g5X_Y5KOA1q1auH#DeZMLbyS9eHFA^;ai3 zCl5O%5gKVF0N{&&`yR}lv(XLQ@CmDyFj8DmiVwD&XxyYfI?1U?oyAW%JsdKpm~an5 zc{7ewA)#Srkm%nbb3`a4u7;TuanK66xM!tPpurI5mvHE!%{dkx1tD=$GMkI+Y!3@w zjI)U)cwm57EO0)XOpFV0U^CtToG1c!ql2sDS-MwnnLHUxPGxt=Pj;VMr5COABE-I7 zNY-ioatO(ODB|Vq#JrU4s(q0JUNa@}h+2gO^e-J z(!~)ALB@Wf+<@)QLjwgLg8te3#QRKsOLn}h1VyN!JO!O%wQH6aoNJ>9f}^AKUnd)kMZyEGQm; z>46FxRJLIio+{vxJyW~EG64n5#jwy^rh30_HYTN6HiM)mvW1k!VwY(#At-mdsQTIp zuj+2e-<_#PwkExt>`a><15FVeu>6iBLPwZxKm$^ALrz`2lF*yUDUK*XlBtaXjSLDq^ioM59l$^+dsFFjE>y}v8CE+6pIikPRj4Y2!jLc# z{^eYUh*f5acvUvbw8D{!PD_1w3*qSGk*_?hp@XI27U7y~f^b2$Y#a%qtPmVEX2DxJy3ZVCB}5js%!4 z&@JiRq!TYiu5L=F_egpV>9{I*)8DIy*gA}`Ur&aBu|`WeUQ})a`604jrz_edAMm-brb2^=%hCFh%z;Z0-{$0mt8lOaxX?}RwPa9M)b zi7~UK5HobUj9g}(B-z=*BMTxnlcq~b<5tDr zPWn5AxgvXC8A3Ca63kCLsmOzCKzcSyBV>kW;ss@LY>=l6TaxT}LxmFmQv@5`=zq@C z>8&RlOrzok^$_M>@s$QG;t~py3gp42xtUJPpDcJ{Q&XNlks!RHReCcdB*) zstn4D%oeP{c{m7z$Ylv)6mBli9lvvug)Q>ZQlNXm^)bYuWAqCqxv=44N`>A1yj97g zWPApo2R<{lMhOw5&X+_lB%Lfpa=67H!0Wj_>rYN~s!j5=QP3X?uex?gZ6_7Dv*_uR zRKmweUa)xJWcNUk%=}@2&vcQYAHRVOj>GbK-{lnSBFdXtxyYCqt(0jtKT`{oAdPg> zNJQ2fpZoSmB9bqw8y=T#4?8Rc%1P*Wf{*8ArPM1-;r1p}6; z#6xQ{+mVKY<8&^7dJTBrwjjE=TT(;^$m0W+<#gZs>vilg|$4Gz&tPe+g~<(Wy$cOOyD(8_XHl1PSI`r8I$1nIS7_LU?#5pivE9 zJ$!XZ6IfXOr zdy|RCvurioJZjrAW_{t(}zFu#Qhe;t>hbEj&HTz_G(JMK7 z$;lKUr+@Ivo8k;dq1c9=9vm#mXcNaza?VQqueAw$iq1#|%+77JhUz62!gL`#LjN-z z3}^qp>l$Nx_2d7fY{ZPRp|8EGygCKj=%-h3i`=$)MXspxwOpMZvQN+LNmk2vIFget zW9+FL%r7Au(XN`Gr93Ww>3pdgPbQ*`ei{g1q&1MV0+I{H7Xn5wTTM09XVsA`7^Djr z{q&Nrq-+`leZ*84jQtdmCFPl~JJd)5zD7bhqmjgN zH4-@sBujTbb0DLVe3%PdXmXUOuE2wWVs{p$+p02!vh${IyE3e42^Td8ssFhnvjreS`!jD{@~4B>?b}PX_d%r%4tD9(?YMbkk*b& z3kV87q#sFZn}BxNbGr0QXXio7Gca>=pWk3cTl)GJeek0 zjcJA6@80rbCeAn6a~7qKxzrrao1w1w4)ml9SCn1otl&P${r48V=q#sUuQq{HG^H7< z2zykMt*v2)dQ;mgUM@XqQF&EyMs2#LSv!-RhF!a)Hc^)rz0aO`ZMHUZp>Ddj-I7a` zr(pQ*1P;)Ag_J1MG~hH1|pUGUz8~KlESm6*V!t^zi%3_pp}hk>@yrhpxX!W(uQL=}+!2Z#Yb}D5E~zeiJlJXwCFgjfRLFkSG>W*jJ>)$+Rhg zxiw3~8AWQ9)=lR+1B%(u`tt3(lA+9*+?k0De`#;g+6kzrE^A96=x zM(9{Gr$3uA1~X0w_tUMG=$>zWKvMyVnpQ}u#rbU9}+6}cy?Vr3aO zP5RoEuWI-{uLL7T0n6%IQ7X?UTaW9>=Q)F7{Zy^xg?deUd1H|0C;ro3T<+?h{mvK4VnGBn>ouLB#_W4fb-We^e<$Av65O;~_Y?1z2Nk(VW_v&5$ zh=FioKUf`(WjI#E@W_=dL?6jg*;45l+ z?gdU_bVfP5XKsHI1+;Ax*<8k;F;-a zOeN4&Mp=9D3jON~ovMm{x|&(x!p3Aq)WBOVbasmwtxTAkXl0vU(63$SG{m-8D0Gsz z1?{Tqg*g{tbjfI9|9SygBnQJmksEiYm)$8`CP!$o(9%Q!HCh@yGg?|8qotWpPH8R8 zGYrrpUrS3FbD&wyGFsZ38L7{Nr;@e&f1|Eti-`-9ueXJB^frt-MsMSpA&t~EY;ELr zMqKxe8i#t2W`-i4QYb?y*KJou#uL>qw@g~k%a@R~5|lfbqqAkyzSO@izr3Fu=6Bg> zfuN~V+UEHKZ3`nkp9?D~&hP}bh*52Q zT{0kpZGsG?mB!3^CfRbe*D2e~60iN)sU0hm|GJJw3vwDY(C%2ZT+^RviPI`Q6GhW( z;dqKB(0Jl&M$f!ixLIUS|28ser%Wm8@f6R5-l?Q6wnQ)%OIJpNSLn8Xp{rqV`7 z-!mn=k3QfMXAsfMPQJvMSpERh39!7#$hc0~uj%J6aheLRBRVo$j(Xi`1$%W2y3{#7 zs8{AW@e;EEXW4&1V77Jmp{hNCD@*hN4^=hBUgP+`F%>5yEc1GO+Cx>u3yo?0P?ZWV zBHB28ovnACf(P1j`lTt(;Hut?z#^ObviBdd5wWM@FVd?=OvQ_1wmxR6b4bAkIc)#3 z{^ZfB2JiQ)x@)R)FDLtS>B_1-3ANSS0qS^Ny2PrN=f)-01mZL9I^Agsep#V+>9hvR z^YkYzC7<_ORb}4iby@=?eQBqc{`XF+zOE#Y;Gg|qCA-YUw!B#KKO6gFE_32}U5tZX zeRzGXKKn9fd~i`r|81sItJht|Cj1k+@N#f_yB={l4n@oK)t5UXgI7lNlFM-jx?R72 zxpN$;W3O-$lKPt~*e1VE-+hHCvi1t+Bx24CZ*hhde$APPk=7CV7uPt|`s@~Vq&xM4 zEzWOwR9}gcVyB*SrSmf$eOKb4^RTYG3Ri=#_1LTMhU(O}U*()YSe>t~a)tm=)78!( zr-PdkMp~7*e$5bE(Cn?-TXgJ*`ndJ2o^my{J)tKaQ78&%IBlvdMOhDhN@0w{)uyH2M->yGBtX?Yn={2TzmtKQY&F%W` zYjBNtSigCV^GnLP*E&Du(R!_OJn+7It+Ry|dRv`%beKC7cNuGP1Ku7hbkQu}0;#hA zVTwL#7LH(Z_0_xuzbV)E&%);b@0(f9pn^Ujlvi~1b^DWEu zOV>FE6A7pGdgrIqGWmLEc*UCM=h!2y0P7&$EtnwP)z9np>z%7Az6%iYO&oPO-66kh zw@Dv48+`WY^JY84gR5iG?rsGvCdCA!d3`omc}IUW8)t~+`U<>Dt=0Oj8=U>g|JDu8 z5d~MT2C(^h^c?Oazh|{RV~%qtpj2w-u2{<>If3|_AJNOTlZb73I4801;f^hu?fO_- zZr1sa=){dqRje~NvHg*bgKu=KAhF~p-0XD6=H`BEeMGByctc&Q|1i&a)_PO#e2cT# zdR@PHJAMJ1Z*hk6rRi2@I)(4L)w!R?akn{BoHxtG3uqWzB4`{m#50BM;=s5F-Yz%j z&9^z%G(Pkp!F$5?bcquuCn7sLp|Jp+5Tsm`9o3lFD{HS*Bc2uzEQN91&&PoYR7`ew;S;Ay_ ztS2#51_46w$=~Qv$xhCRZhq)9EfU#TZ;n`8%%oE z-Sq_PgYgqouU*$y>kSL)%aiyr-N&V^M)V7?`_H+O(6n)h%M;=0;s}8Ml|J+d9ArP& z)1H9t=IQn)poI7Ju1`6G_IW1;zbzaVswJy<%^BCfUdwYBA9ZRnj2}@Lri>$)T)!($ zLuSpHrw@D58M0HmTmf8Q-^WEe!t%th0!hhH4p<&X{%s5-7AIQ&9n)jO6_xs{r@$MFOw^zMR8{X7&YbGSE1{V*Y1P> z^{LM~5xGReGdzPAj^35oti8Z4&shHwRDB89EA=^RnPe;V&1;>8+P7orCK^iW zD%}^GW*pQBcJ!RFsII#n76#- zFooXgLAu$b?RCz%1ODMz>4$lKtPwm$dt$;Y^2~mpyf4%X*Ezd3yq4)dHWQ-#SjkdU zo(SCLE&cgA=fb)>W@#I%E$g5cqnOAP92=HdIOip2pWwP#dfiLTLCnIumz)cO>u2fI zOCE`u&);M|SFLx3gnN-r5T)mz|T5aISk955fOO-FL@VRcvjaz0>;%Nr1F7I|(g7AP7g;ws28pfTZr5;37{PlWseCG1?179n zTlU!lF|NOyyT{ch8oiUQR0FMW)r4<2Wcps$6Ok#*mGg|cLFG(<+`Q9O!+9bg1%?F< zIIM;3Z5Y^Czt^?P{~ClVIenk2fr)Q-?{hsGaY}s!FDG-QM za*r#$BCJM824!3nfGyLhzWyMi_qeLc9s6DTkh4Vx!00J?{DA9GWGVBY>mvk<4np3% zDt8`48GI@4IpoTyqA8yVL^n}aYxziz3oIcV5g;$@bqTrPkn4kRJKVcy&0Z`547L>|8hCV!SUgrxq2r8#~(hD~`PKlSKC* z^lHcyw3SP8UqzLxH6Z)_CEvq5x$iLQ@|uh+bj7<=0VCCoX6cf_{Q}{zg#J`MScn3c zEnhE$?e(WJ@`x)fZeUDmkndb77rH4J_vgtLM_?d7NM1kUT4UZQ=Uwxw>yUwZpMK2M z&luoMI_`=v&0iyA|J6bD0{nV7k4DJCW`1?eMS(JZFif-%Mz0B~El>T9MtM}$KH=(M zZI8i-r>5%)YTX2j&GBoYIr4XpL;%C?MJHSqDrxHp)YT!``lPFhIr_4E_oOS+l$Yh_ zCtYdg>>|14q^nxs`Y9;51M=!g*Zsyp+4Pjl#aHc5K@grV|8)ve(|kGql&glh>$2Q^ z%9WfnHaOMP!lzFyLR$eShur%wOXq1crS7el<%rV|^~cIDPP^jG9hc>X)2@t!VWj6*iXH@HT9-Qa%ZM|s_?n}% zx_+i#3auR6*bs&UV}`}~%4h_4UWeh2b55M>cLq54s~*Oxus^W!TqJw{;d(e} z*aj$_!w$AZu#oZO(_Ud@cuo+!7< zX6Ieg&7m7*;dxg^xqNs9r3wvtFXbQfydhIAxGDs$#O)1`oSR&5y$!W(*r3$dHW*I> zsi1`TwpwnufR)^ADgN!MPxCMggQzNopp}b|Zq63i@z{+BsW~|ZEXttNP%ex@GaQxN=0i&1!wi8#y3%r3o_ znFgOY3Bp>fcL{=MlQev&12*L@`5Vt>j0qP%~nV5UJKMHpK8EA2z1A#D}q3*bLS~x*?%_hPc%lZiC?Q)?PWVIq=fkKzBae{^;gV6pZHJ0nc>1nMvsd z&T16~U%4|???UrIis9%oiY+|o%fQgMg1hU(-@I-XE{yOon1GKW#l!;ML+kz=PScP* z_g4s6`QPx4Kh!x&Yg}-uKdT$HN-sfdvN#2R+$tX2n2)*Wf$E!X$xuPLA*I_cN)|LG zhG?dWzt`M@%I)88yy<(k{R~HbNQCihmF`hRS;aO@PIeY^&^H#z9v4aYK(0FLR< zrx3o%!VT6B;kIJ7@el~Zh}si^sVPu-06}M>+BZkGZ4g}{0~Luw=G+3cr$4N!)!cVJ zl!{P)b{TgTyz8K*L*?whTs3i*Eh_;&=bZba(PGqH!@BJ{cf_!s;=#K*_l*$XyfEbx zBA-vF{f!8D<2|in@e8hp@D~n&V5iAe$il~Q@UYZ4pFb~@v=xgABGCnCR9qP5A;d`IUV;$k zXWi2=ga*20Fr_+bIIXJD8ma?w5i&rlp&qQ-diT;PWIwep!#_wGCZqgHk8ou|Gavqh*MMa?d$57qD3J|gkwjyL1Y(vNnw*w)| zVkbgUpKi2bp?E`YDwJ<9P{Q*s+6`dnZE%npstaTpZZl>BUlNJ|3)r!d?_zHNeLQnhJan;AY$`wr?2dsw%;+0Qj;* zSLKw1@TLJOFRPN=qs`~9Xj?Mgtzd`8&smY(ix`H3q8)T`3AF zF;V=9S6 zP3HJf^^_3i#$74yar#|+m>Y73d#HY@80HSg(~K>Hm|%r4H(W)!cMMQZ<#i@btyNUw z^wW$R>RoJ@+ru1=U7;RhG^YzbSGg#si5}9NMu)j0fgFEYG3wOF!haMpN)sKsP9+hk zlRuO_n9M|IDm>zar*QprW0U$0-u)sxAo}|+>Jk37;9SZ*{JMGy!Ar7sQn41SbAJ|U zbdU+sN&Kn53)IL3uqDJh+{5m~cOzjEh$jpcWc&Oa_2j3YZs;cr6(K2A{oIQWs1GrK z5W4&-pE%*G=l575qOdfBF(8*C zR@h^SsRsSPIrqe1ITsDpC>@gURXAP+I`_y9vErd}yJHeKEyC#}cHp2gckY(UV?}x; zJKo?-%CDV>AWUTi2gYHakS7D8BScBZx&;L|O194P9_AIpoe zQ18A}0n^0SvR?&UdU$b+{H6lNrVVoRAW;KDaY#jxWG8h{vR{Tpo%C=xZ66 zAR5N!W9)_eZDQMz6s`xRgqRSZa9jGb2_h3JyP*lfRZbo9=iscSO~T}|1o3##`SH-$ ztlK5S6G8Ztteq$-vM3%&6pu3b@rf95`^zgs#9c9Se703^?uJc)F!zRYve{6PO1`Wl z#n%H#BBuuX9xMS_0Fbm%FKPh?Arg1RL3TSl4k<>d*D+EI6P3cUkONAD4&0iPku1^^ zPsHF`sQ7)~7V%rinC@er$Uey;*<3MR<|K>z!?L3B0>0hc2iM7i$s#G~mtvz4=wT^W z=fwh%7u(%^VV#U~iB=__Y+fgOxo-Lds^^2>J%bfrVyLw)21U>{L!y3FGO4>*!u$q+qz8NXXb2l3<{dd7t~ zdSx@31+7?23idDAayAmU5kQ`|V~Ilchdc|1`Y1C?hFYkev$Cdz$?L4_VTotWOX2d+ zEMc|&bqtCbg9W~U)(0HT4uI6+e>XY9b`Y(T(2Hygcf zk{*RpwBUny0pF*dkkAINa({|QtO!kzDtBmt zuxp~fEB>T9{7a)B;}iS*0t**fE?-Xe@bw;294A|)il&}IQVW^uoEqE64-TYJ9H5BO zMvneqjv&?gzCEv@t5-DgD{(;+m<(+7rusE<}I$r3vk*m=a}D(pOkl|AN&UiCK{iDZKt6^tmZz$c}UeI5ucDM|FI8{GG|5E*o>Xuvk*>_ktlLiI^-Ote6*)dD? zGl%Lkp>n}zA|?Gi`BDW1YH%^_%ORXpqdc(yKzYJN^?4kD{Y+R zj8p`-&O>RVJrs^=VME6oYt=YU^%<5vz8BX68#`!PQEYhb@18Axo+~Q8Fcn|>`XUM0 zQvz;jbR)p4*EvWBxrUR{Q^kd&TjV^xo)=Lv%}Mt>(WpZnqv287?r0@_ zm5tiHOp${&qa=Iz%pMAa6YetHl~F(zFy?K9CQ1+&qa zDk46#kP+|??OraUs){zAOQb?ZQ&Hb=D2;xuCngn-6{v46xwH;LN>B-#F`z?#)2Kvb zx@3ky{7uuvHGXI6iH(&5&xG*j4sX6HrSe$h!p}2GQge=eqXw3U`6AUbju-X%cpJ(G z&0CI&FU4CHhr6&6-mt^3d@rMxBTMs!_?vhe!S6J0Jojw#2H(OeCBt8C9V=pk=CDGe z!=&pAQ9V&TK-;DnLtg^ey)3f3d%`&RS2ZEz@GnGyC>9DL)ahAniCRP`&MEIM=tOIQ`-_p-?2}|;+>LN8TmoIYVht);A zCl~MioGUQa@#uq(&v_()K@(}@5j+nlG<2jfvp83(iSu*F-_GxXQaIe8C9mhF7*X;W z>Rc})3HqtVyaN*Fd?X%>Fg)6`Z;~w}e2+@9@a7~9HRdrT<~uPkp_t7R{N_XyTEQF~o;vQ8U)B%}>lY~^ z5L5h|*jT~HRul!rCAYur+@0rV=kXbrX>NbjNdo2S2?DNp(f5@bCqL?P(CKlXfzuz} zQ`r5paED+H1|lGGaDDKx%h(|*#2+N|F*K%qUJtMwxL#;Br9F)Mxk~C&&0r$O`H6~l zF3itPvS~x1p;FTBq99ZGIZssQ3urccoX*2{cboH+kmM7Z-1Cq&q~e1$MGemqrlGrM z9@EG}8l|NRU&TQ;?W37bTs~+lSwsRwiXlJez%b+ti_Q(_AdmYwYm}Ox;hq|`2!`ylF~tiK zEW3P6!_zdX44CCy(5SJ4#b<2w2GtUi4Vn0d6&kC2bonX6?qbJaa!qYf*T|6sPK-aUB23E^{jltsnJFsb{kNGSISJAq1hYQaj!=8-l^kK|u;hcT$Z%eg7c<4c z#7ogWYZ>O;4+dn|6}fREsE_lKcXS;QNFQet?-J9o72Tk&SOqK9KkJIs#z^mydRYGG z4eC@GRA0OfXZ`YxKVVTZvA%fG+!ieR95iEONCQ!`f*0!soUwzh*udE6=Wrh3QI3W( zXJl|(W+r~(rbDc*Xcp@4+^5Ynu-c4alfsHuwYZ1z9!C4Po_urz@nSWglo1_JfCM_{ zV+x>5R;o4VF8*aFE$bljM1{jJz}`IXQP~-n(!nHAFrn-EWycw{CmZV zSQI-eCCtKQu=$O}PADaZRZgg6!$0$yb!(0g;rlc_TX0Li8vfD_Fdm?tH}xR;bQDiLD^J z)kuh!*$;>;7E5Gv!D4CDOR!kZ^-{(1SZ~2%IoDei%h?uK=I6_kZ$S01LN<8_%Q{so zsVtFpeFRIycvy5mi8Ojs)DE&sMD~6YYEi2t{^VySG$W`^!lTG~##^GfocxH&`u;~$ z)_Xh(&3Lw)@u*&9axSF=m5!p!OWZr28Rf)0F7tT zPFTR2bRzB7A`u2hYAufrUYas3KOm&F0Y@-wb5CC{`?M1A`b>)rm92M)3g#^M%WNfj zM-APE`2{_Z7Q^m|TV+OTl<;xcp|yw)RtY#@_}9O+NPw5isfaYT$)&ADFZ1|OZ*w;ZBex+<*K%#6SOa>PlyMxj(_b53|wdAq$kAOkdxnIzN9gdEZhx!)bw?->yzSR zb7Q{z<4Mu)=49l^r?5p(oO0e%qO*A_T%P+ou7a%fw1_ZP%f?TO5yl$tPfv>r#$Cgu zRMzXZKmaWrPTIH!+#Cx}n7rwVZ{U6S?6XiyU7a9Z&p}TF6ZeMCi5BK3Qf56T;*+v& z{W5~rb@9voQhxKCxG(8C)`Z-IvQxzVi4SpOf{c7#v@?f7<@G$$7$)V^=f(Zz$5IwP zFRGeDqzrih&x53V`~~Qo220uJ1@V)SD;u{HkJF5|u$?F`-+9kJ+5BXa%x@=Lc4R#> zXOk@dV)1i%+44oGkfv>tufK>ccu7XSB&x>#xCxSl>0Zwu+MS2MIYU13l4vKk6d)>y zMw|A=J9M)g#kn8Ac2uw)=EHjmWYJ6Fo?4eMg>W$Dg}j!3SUGA(DIA2lm;D2h)8oPp zn5Lh0MAEFgY}-!Mke|Ga{Op5m;>+lgzKkdy|6L%B_F|R`R4JiossXlpF?&aDFSPvq+BI2*sM>Cjxqk8SE9XbSP(J&(}reO1|?1y!MMHf0ZGe zeWnbfk$alqoF|vQjvD%1o_ihJl%LD^9^w&b;9u<_GQ#%4P76t6cswnq^bnOoQ2E$R z3wHl3H}(+qV0eG6hiDi)iMfez&X@P~6cu1t{&Y_<^8Y$5$>;lsk8o7$PID5r(lAR@ z)Uy&`#k102%*Fq4LfYF`yzHBcOHD}I^@Oy%j0x%6;t451X1yZ@;CzhVyP}Q%c=#oe zZ@e9qB;R^hR7334cQMIslWX4*eIU_pl+bQ*L}u z^bk8^C|AKa19zZxKsUyQFKj^7Bmuc?r+o5X*kRZy|Mf4i%)AjI>%K3x8+*KgA7HQw z#dZl>74}|agMMNi_hRS*Snb8oYq4)H2KE=ldoet2RI(Qng->gLbmzU8BE1(=Bttx9 z?!^?{z86!Z_hL#mTnyMyhx+znU~q?h70#{^_Fhbmr}SP7pp*%*7jx1h(rXm&#bEm# zdof{U?Zx0;db61oE+gX5boc5lnb~q=7RH~?8V=`Q8H5n~5^ z$wH)79%l_Du|g6(BzN@}kKN*NHt;5ovxj7p0it!qQOZ*nBoU0#oX(+(fDk<@M!vBo zGG4A8AgV@O;N}AB6YGsSGV8R6letUcf}-JwN-bC+z`&^|bcOQl0D;q(>txhGO#I72 zlo4xrz0HeL_N6x6>`l5F_9k7=+c)WEVY9PjlP)Z~h8$5|G?wEAiqU~rxC%{@y(ftT z`QRYz;@jI$zsgmE(32;~=O;ts55fQTd$KvDv)1FX@ zY%oM*7HyCVAx6FsZ?R-x{4 zbUUVufS`N9wh0^_Dz(QvZpzG)0mDVh5-^>JLAe1#8LZoH2fsI548O;#?7w^~R5(|l zsv6$!;7E_hI(#qSUm*lju9AeXpvL>_vhgP(Em9{8+KNv{21|A_y4vPQapgedn(GHWb;D};AjEGO@~f9aoSEP zpi6Cb6z_FRm@jI-^51qhij&h*0Pm92`x#{>r?w>Y-imMkLT_Klb@QQ@sP%SM~7Vrry-~U3?t4am99!-k3C^*W{l)ChQC69d}s>m;?;%&!s2Yu+h*|w1hSF^2Pkf6E>OcPUEJ!cB7gf@*uU9RV-ws2evwy?Os(B|ihp+;p%@ zL~UVU^8)x~;1}})`yh7{AQHmX8bt1}>%dJ6wP#VX>u{ii?pBo)bO63x2W~^Cy$rSY zz=U9qsF}oGL#eF>wLO823Fvd+%KNPeBB?5N5CAQ;&v2gm45hU9dV4}HoggZEib}w~ zjfJe@jfEn5bkrLQLow9dX=7n3l;BiTO=PH>4z?rIW(0Z0#=_MYl|Gp&wXFboE=}(@ z*k9%1tJ}5}a^-tcRJIn7xYi+a_kp_&c2iLH3XlP%sNgB&t3vRKu|pj{wJ9%S{4yO6 zIy^=|dT3Yqe1i=HwVi|K6ks#{)pTz2$O|8tqu`)AJR46Hu)`_Bd31d?d z1@EU)c2`Gz(pDLER~gfl-Bp|EA}7VRyRw~X7S|zdr|84`@HlDA5Dg)#-!lU?Rs&;E zUZrfTitV#tWA)JtZDTcU25hW+)lD0#Q!}7;$ffk2t4*p>b74|djDShi2e~3cPH~Bl z@O^qaoa{u&tl{E0vwyg}?h@6TEDJ%Lnl$ZM?i>eGMEso$qVz|%8AP$Nl_aSvR?F<#Agb%^|7;L7 zjCuMW8AK`Me>aFyuu{xGd~c!U_cFwH1uiA)|0e-{n0$SXaFr>&xBH*jLruogsB~U$ zm)}3uHLS*ATY{J8(nM0MZD`1XRz(0QSxM}7VKbp^B~d;=Re>^Isy3WN{=4zgaJdDh zPGt(^jSA6#c+kOTyF}`@+AjG7cgzN*j8B#A()rtMmx{tmY?sc%b}5TCF*SX4e zN!~k8H0@91 z!GsBa3sC|kj#F(zrs8qkHe_O5s|i92+iKB(lxWCwo@{ej0O3I!G98sPWO^jqXU0R` zvXtG9(l02RrqaCGhD^o0(U1uyO!%7xLng1kY+p@O6t>wCIVokfq}CJQ#9tQ97qKmm zQg?>w8a!XqcsSip`GI>PgY0IXlFNA2c(*D%9f+&?u-&|bE+-V~} z->A)*q$))yz!IP1sNrX?G**Vzz`oKt*`kK1S8r&n-Bu}KTs+&c4CF}1gWi51Vp=c<7 zt0AhJW7o-!Uy7=-QB4ts?WhN93M=9!DO~ofDKZiUv7eS`fO(kN`zfVd>O(DRlZM^fsFh%13RZaN3rq%sn}*OJtx!> z!rLnkT3F+R_tUSS$~LBZw=5RFnGkEg`3CyHJbC^b@rqA)>msvm0K86)S}Izbr^m~} zrI06ok=K@rc=OzNS<#CfuJJPWgh=$mw&M6u{SmK74gOX;U+5z1 z7*-Ob*H(s=5}Fm15)OqrY-8lV466aWb9`l3R~+~^OKgm6 z(mf#1A2Lj#?6FD=PyGi@Okk>xRUtgP*oFiSV=Uh?%`|7-E}>L(?$U+xl?~)GC%>#I-yp4<&7oK zCU=JR4z&`3h{P!b3G?JQCS~OdA^x^wkyPU{zLzfpWDfpHdh#`YQ~5l5v0P<8@8k2r z#qx^%TyZU)hb@+k)@uASc&ZYl&hLyNi|}cvD$}}UfwBP#9Gpjx{OUXG=b(gsV%+Zw z5f>)^u;YT)A@0Bzhzpk)>ol$f;wo=p91a_Ri`r!*U}rKq5|uV%0h3YCT6c!F1~m92 zslujLz7eT%JYhZZa-4ZVB$BFG#`m%bL0*Ew6{tp8pbr5G+(}TBoVi|S|wZxYHPOBnGv&lQ~vU8GS{>uxDJ`B1G^-#+O6(n#JnuZ3W@L4dlFn?C`T@D)J{( z!CvfF|D@}(C7uI+BtucM&rcdNoN*uXyGXeRaV5PzKLN$~oiCju2|ArVTakJZi!lzV zi$oo}2(e;oN#W8H!mUH6Jj+jy*&vx&UVoau2@H(a47Ay%nYzxnWo&d&a-<#CxBzh@ z85buz7HHi2jQfF2*pf34SHgT=5}5y!9XK6qZcEZ>b=(dPW|IcmfQ(Aku`>`GdV*A3 ztP?r&nl9*rL?0q&h?iDHjn3PQG#4{AMmGCJ^Kp)G^O>_~`H>x06V()Wl3pGIWZNwo z*PU_0_rpNjUyj-$YU4sSceC!1P9D<<2n-4i2@MO6h>UW==meGs-6KtBG%q>1tt3@N z=HQSq2F#l)aw0_4pt34*${KJ73uk##MNT0N4(g>V(i}NaZr!Sz4C~CiE@k94{mj}j z$E=Yp?Pu1MdH5?i+o6ZQ*3F3bCN ziaJrZ8dvt?OIT3O*om|8+JZ9OwxBGgd}jm7&|UZ%R+2S$!FUGZTMItJHuAMyFpMdb zBX^00iN{C7_!7CkN$A5RUOsUTW{$>gSc9*Z3A@D%eD&>a80ww;Uhdlsqc~VQUf(S$ z(*}0blPG6|d3Lz%Nt8dVAP4V(O;4emzXuBCLRqj!D9^!^Bxwy91c!v89GSasujo$S z-pUv{Zp+(7_aB- z7jb6Zd^vYNa=T-h{Bb{gG+g^rrXLVB%>~Qkvj?!a`Sfo&5YNfSkE&_K_Tyu^H^S=~ z%pb90QkH`UbQ?N#K->kRQ0Y1-GR?y8WSfJcA;aMZMWSo!WSC4sWJin0wST4tCFu5dI$fR2UJCpv+uvSsG!hL;IoM-;#9E0K&;l^XAQ}mrbj)}+k-1xYd z#&G{}@jk<6f77`4e-jzReg2!c7xCWTeuEho7FJOwV9=~qP{~-XtUe){7{AJECq&;; z3ne;{;X-NJNvv20$-O594 zC76oUH(x_V;chut`*PDYZ$9>%=mi@7_$vH?);&r#`a?W)hxNw?fa>+f&}2F553vRA zeL1o}(Wb3T`6sZ6TyU z+3YVsUb!e*84#5(ifS_NqVP8!(BGfGhTr$A-vO`}33to3*8q89jVLF#{#(Q-upmNB@l!VnC~w{-*ukB3-drlh4(R8YQHR>iV}|kx2JL@-pRn zP+=AVV7e4uRn{p20|Sackk6GB8a`XLF`B#>X(~#+P1QenXK#uo+xIf8L}TRi%RaQr zN8mi70*Fk~A6fXBsDJW4S|61b! z4eK2>4t-5aX&hFYV9C}v990?zYCsmv|bd!3Hmj96i7B*;!#8VMN|iOgzUgv^Z8nSF$DTInEr*>OV<=hHcmSeZJ9 z?*V$r);SPVmd@ek0;qm#0cd4}DuC)`6@WX+(%K0>C$f5Ez0Rr!vB6qDfmrdqj$MUV zVQVPJPFWfXc?F$M~6bV%-i?eS4ddXH$5LA|e z;^xe!-kN!>xlox`Cu(ld`N=xb$mVFZxJNd(ZP3~5%~-9_Ky1_xI(8vq%^7)eOSI0z zX*>sNy@hm^)A>vvu88xgCzy>g)f007ddXH#5LA|W;^w?2+?rRdw@`V7 zkDgnWJLskGbSr|jA_KWvyHV$=J!7>h1F<*k*lCCrwlah4l%>p&`vD5nYK#f7I{)#p zh|(Gi*#vREWqVKQ{#nEet;J9*Qk=bY^*bQ}yzHeGqa1k2{aJI^1hGQvHq@78uA8Id zkg%=nn30_D_upNZHnWj84+rG-aIDdB~sb zxPFY&8V?zftZ^$Cr}Z7Ou!4@e%s8#ld99mkT-3DiHN44!Yj{>4l2;RO>|W>F6yYZm9&TsjF>ntAO@+0=e!{g{K-$bNVhwmzg-RN(5@_7~%DejOoe?Z95z==I+1 zE^C;9mC=iq<*|HADOkWYgh0_yucZ96Ol4phb;Yu_LXh&U_s^tQYP}zwYNZ8V;T49I z@oMkK<`d+%cB$CJDUkiK>QBB^Qp4#0AJyzw`Q?6`_uR1z8ZLhS^L~-;^-II{oOxoN zY+T8z#PF3$RuzWBD?z3^G0*#1B}j?oPR@e`A@l+FI1Fr|Vf8mp?yX|gFBQ-8-YPz& zs#UR6Jn`7fpC=!yYKc(;b3?lm-y!^(Erq5v8Ufm%HM$SBMy@e%zhZn4k_&B|tm6(8Q z&&ZKMs!S+TC4@4W;C;EB)ip3`3NApz2??#Rc24o`=xDV!jalA$ova0{ha;V>W(ZQd zSdF2pYv096K=6JSG>Y+ZrhZ!4#j2bLC1e2aVhf|USNw!C0JM~T>hid&U93ugr*yR{ zBDklkl~#f9VywC%O+)*3wQ3^^v%6aE*nBuB2dzl_2mg_DKn%Z%m%Ca|8u{MGU$f4c zIEmc&buhG2KKr`$4oHK&@&Tqw6D+zy@Y6EXqqB!%9IPgnif!A&ipOW*FR4^)P7fcS ziK3W-Dn~nSrT9JC!>U~Yj1uQ}f7Hha;B$jcBEf5VPpgsrl8hjS>ZQ)ezj|8FBB$GW zS}hT{dRcEGTcdkfb4?tlY}Ch^iBjL+$Et-O^i69#nnCWH)-;^eZSocr*em4LG0^X< zkRQE;EUb``W33bfnPbt0SIe_+S$)D+z_Y#;pq%e6k=^@R8F)Xjuk|jD4m;nr?nTh@ zZ7V+Jr&w5iH?Nq{{HZG`mz!GtGjSE;dvBwC{wVYCK32WA94ne{PpRwqaBsQ%TmTNf zZB@Q|Q>@#sPoy7qqp$&G)bI;-0*j-O>gSIntA0*qH1lG8H!S5enRet>ETR6d$BsKghVkc&+k0Bl7+~VV^6k@W?oDbq%GQe`0#y0QwgRC~> zwEPh3@u+RFa0H87a7;0r+q`cMu{xVUb7P&GV{wXo?Ly0v*N0gv&7y7ImBX#!rkS-} zK0E>=-1oBbNGqMl9mUAEM_TjDob6sQ3S*6x^9#o98h#F3YXLV%mVa}H`nT-6+?g0j zC&K4Sj@24;oTIG_(%D^1r_*RF4qC(aN28B^D_4xRcw69~qpe3$XL8h~U1PeR#Xr@Y z`#Eaj7kqNy(mfnV?=S{E>3`asYRXY#t@}cKZOXY^?i!1>sajNZS$muni;bsy*H|gC z^Ei+_C^4+}l2ylJOkX447!NjA$dAWc^*l>r;d+Z>9fk>9Ys(vPgE%T-%4f85O{_k| zkDCEA@k?E=h++wJBIzKUk;kBdO$xlV;HMiO`}|AehFlO1;t+HbDR}ZJ{-jw-fNV6u ziZl+(hbLf2IxKtYV9W&TZk%J-IKgTQb`zu}(24Gms-HchpWcz^U~6!VxFuSxCM#Pp z$KxVivQkLL11C%-q}k*>UD(RwyRBv_LiZl^7T$W*w>fxU%%u#Ni1gOT$~t&(BF5Gg zvd2VKyi*vtTCSLAC6-eqkc&@w-5BoxUL#KctmElLog`^JS%^DBt;&^rMZ)v$zE_II zc|?%CVB84UZIWdnx#5$nOsGp&OtPw?O&pzMok8T*$rxKU$WxOs(iF*`HcZAUOP~wJAzn!Hk0U=YLMGTt3^XmWYg^)AFiMx<}Dh zeX^(x=X!Z{wslX9C751YqG2lE4X85J{0$5x7#uOBshSB_ePNC4ILEpl?O^sC49;t0 z;T&rgf`Ok|&m%banKi^%AzUfZMN8YfOR{@ii1-Gw3gL!#$vRA~bjHIi)PIYBaumN-3snPcR^2&nr$v(Vkc^$;js4SntDk$eM4g8c&uzBVhcW zthoTSQd2-NQQsUuUNz+sl)P`LmBzBa=qp+|WGOnxT<^@KR=NpqC!3a8AHZ+_L(8qE z2$1;{e|W_ITQ1NMXO?5k`CCSRYo$3i#ipR0V9rE4Hu?b5{9EfU+~Iid3M(z{XUsL6 z!F1YKB>{2S`_>BUxWV*`iqrS3!eBNy>Ld~BDJ`+p7gaEEiPzg)D- zN{;G6*_1sQ=?8{&kRR=`YT`0hLJGqPNp#W)QfZW&^u5*G^ajh(^PF)(oU;t)3hCM# zS048lIN{07k7x1>n0$C;$_8sL;JbN~oG( zIXyQvQC8SyIpO4@+BU06{m>TZ$GGax=nkhKVeU$5^FEk&=v3nI(IxoEy$q|aFw9c= z`YV)vh}^Qxx(iMWuyVu7!GZOnlD6?^gge5(S}-F0`nuO+pIG~Wo~WmxbuPJB)Iroqi=Au*S%g!w@+y$w`N;Y#MM>Ewpyv@oJ%!1vh8?j#$eX+wfPb zEyGWKRc|{SwMO9Wxue!_VE=W@x?o)LzJ4526hYH|v!*d@{5uv^s*NRAQ1|>5-%4$_ z%I&Jj#p9-`QjJdj{kzpT>}s^?A29UoB-fs_8p(H0U`gne@t-HOkTp+2-YUL!`KQkl z(!Kd7(ek2lOSi9y+vPi_t-Eh4u5V9URd0>-oAJk`2+YwOkCJ>Tftt!HQTqvM<1-s;`U>fOca{9fPAZ}zg@?$r&??{w_(cIUUO zcf0j{&Fb0ht)3nGzWQ3XUR|xe9lZar_OI$Z@)zrt2Sdq-v>Ir)winSws#$SbnDvd4)5OS z-J@gQZaq5R;dRGOo!)%%jgGI{@89m#@s%E(t-fBXaoRJTJ^rYQ0Q?u|aPQkS+!+?4 z9~_P#hx-g8jOq?|5q|{50_JcS4)?6xj7Pm~P(S<}?o;ZAzr&rqhpz%~`J(!f?r@Jg zz?gat_kqLs(eiGP=+^7i9&dN*+`dz{H|1Y5)0zgYEX1pZkIS~7rZuWQ^aviD^YKrm zhd=7KdjaG9_3xp_@VlCH&rYizxW44KnRQ&|ewsF_{F2}BKD=v$!!(M&KlYn?XJ(y{ zb!MmCW9FZbJ!hxgT|IxFVFm-`wZoUIILtsiZ#m*HSs05D&j#CVpDdW2R>gy>n$DjD zc>~$aI)xtr13x#-XoowW4@Nn>RzH?I+*kP{oGkF4W_{vm#M#-{$lu{v=l?Mkpz~(* zk^yH(1*QDE??(}T1ds}1r&l?PryxA3|GPKxF|&Gou2I2jRfowMP1q+V%}KM&LFeR( zIcYz7hCXkol3+5~#RE>UwGW++0&e*`*V2$@D8 z=tLl78T}jImP1HP_0RQhejk8u(QG{GMFXUm+1Xzu*y^BW^rgQHvy#J|a{;-CtB03N zv1gLzq|XZuvxURGRlRI%e|h+{W_faq!>o!1_iub@F)z(*K+9h>z)}E*!*sOqpsf}t zAT!$B23Fajfkov=i5cO(U$c#J;95Xj?uA#!Gfvbk+U@I3hS^;r_1@X8|Z`egjC7b;xacqz6 z>}2QU>tsLjC2QrTfS$s!G~0Lf_oyd=`e#l`0+Ae)e)MOV<_`WWPCUx z-m;S|JgNKJr{6iuyFsdm05-7q0APKr%O)od$ELLovpy&&KqMkQ0Du{|_gCF&cfP7A zTqcDmyzmSHf>m|KZg!u3&!Tr1D1;L)hT2T5D6g654iXr%vi~88XaGh4z|w4tG#Czh z(qYyD1qEQ!9}fUCd>;S|XV^?!0R>Nd3}U7NLH^Hz35N66J4_6a1w_PiaGe1Fxm*b* zRKo(`E--PGfH=JP6abb;`ThVkv!p+O zXJXld31dRxHmu<$4g05uhAEy32uo&lL<2^6=s3-nr&o8)7_;{=QDh<(D59sF(*wyy zWRG>m!IhwRmS#nO7=AEam+AnF4gh2iA%N}FWwR7tL$g#4VK^u#0DEd$7km8cjESA; zJ(y~k@gOmjBsgwVwMle1sY$eW#9_vPgaWVy*R%n-hcqBl1G0ya0!O<#KEO5&NU$j= zKse?RbhLZ&QbYq)D^B*+{f188?aFlb~fH`Y4VBo`w%j{1` zfumdt8!&XD2K3Z`f=@p20Krl2F&i=M4~=-!&PO&$aFlBU0J;1=S#w!gb6KDO9Oa&| zDIBt!_NT^wcwP#Qx3d-WI_u+$#h>tJwq zfr0|C<4gd6nYm$?ef#!0Q)4|O!I5Js5bP7_;6?Qf@Pe{07)yYK7qe|Z6^tkZynqpf zl{1b2j&ciZz=LZwU^PYnQc!?Y0G0s2YTbyEWjF{0=i!Juo+Q!`vkD0I+}S8OhPzO5 zlvos?5+c^yX+&SsbM4}19cC3!m_Q1Z0VuEm&tB1hP8v|40963kVFQ9S1&`wsn}j5Z zsz4mD5f5M0Bz}F#VP=8}1*itVF&j{HN&~)lQBC;S6Gj#Iwx@cQJ;kuwO(QdP@v;g)91TMo>&3pbq8k+`Kx0I71Az6GdPEOlW6_P+RcDa` zrK#RFpvx~Fjrb91p!F0eL=zz127)DUYN0NPXE982HTEe;#Jz97-20rauZ|n^FwB=) zssMmv#Q27KC|HA*#d&o$0cxbP8JOv*pqtjRd|jIg;fGI00Ku>YQ_h$y)k<($uOh&0Fc}wd(zO{YAOV|SWg>lQ2riW=mdE<<*++s z&m)Y96xgsOvrAwK0<;8Wj`KiySZWV%xdo<{y~HWe z@SI<1d?RqaF4XY zO~A-*&*@y-5~l!5OBaIp#T&;UbX>eP(%dO(tzwGq>ze; zjy7NudJ6%6gli_2DETK1F`aD0*XSigtkj6?Z%BfoM^_uL3jKqCsWu4(sEmm2HlQB5 z1_2-16qb@g6-4y30sYY{2)GI^JuC`^sEU|3Y{WC@5JZf#^Wi0lYKV9X01kfR{?U@) zhZvSQ^eaF*Uc3tc>*=%eTE=ocU{F-dSw;$3_Df;E41B=w;H}ag77&rlCE+j|RsWnO zaqfPS(1Xfyf>_OCY|u|(!11XDJa1D_0FK%xZ9scy z$oN4Wk4<7VNw5=Nv=KL;%p@X0XS6^8I8I;lF##Ewfa5lWHKf2HI;fj2s)y{1uCkd> z0FKVlHsFH-v_FykD1vJKchOEdAHO<^r5a7eCg1M1ls zO|~g000-Je8sKq6LK8qHLTnP>kpze22Y_HJ+V9dMsq=j$Wo#uNt_5DQ&O&DBfou0H zEhMeu*KEbDY{2uj?*6g{6es{&@G}a4_BR)KAgRPpm57tQo+Oy?%RsP7UbC4ij8 z76HicXVWm-A!Pqk!ni`RA8iAYQ^Wf}GOPiTT@mshNp2)_>`!NGU=tK9fiED0a<)JL zSe`-M{Y^He47^}ywQ!hCK;cJHV0WqjfQQIJq@^nJiXN2}f^9+oL8gAo&;mkOZ8ZgE z|3nh(Pn7}Sm*3kIzVA&{vb#V5Sa%r!kkaACS~M7eMQJ9O*hC8GP98@C8?nSj%^r6B>-%XPmb0iOPFVj!BuJYHj7T8c++3pXz4?piom60Jia53=eDqn-Mky6T3(u z4G~QNAp8Fy35H)F2`-rxfHF_4*Us_*i8}?YF+~gRzqu$U^o}8 zhN8X#P^fv^W?~Uag3R2FlAsb{4=GTl=??%KO#_rR!-z1`YzhhrP>tiyX)wVzyVvVx z)%sn%T-{3&>3A_1Bv?#F%kfFw71*np=M#A+B?kNM+!CYVmbh9 z(#J7fFr10entL?+l>AqdF+ebb<%)Dh&-63Y5@A0{WZ=b@b{Zp)QKmI>t=^*{pjItJ ztOkH>`*Fxk3@c;l&juRhpa0%7<$_0t`%g4p+2QEm;L76w$<}85ud@^W%x>`Wwi`;M zQ;1A_ztv9I0B212`iojubdV(KAfnI)9Q|7Z4i8W=Ux5PL1;BX#I9mGMr?J86es|dD~SNGJZb~L@bJfm+NLceg$8^90LSXq05I%*CB;zcKO$;3 zL`)+f*izOF)-C1gW*Pr|TB0ZW2x&CJoBM!ZGfV{yhUwsf07yYh$)_{# zN#R}qp0>GYWOMO20MtP$Kw|*f1Hcma*luhuOob8;9W(nVNhtLS5FAinJ+EimV+c8# zD*&}Cg8*O|l}8#3E83d3W2Dd&5u*TLmu`(O8UBEfEnWedA!3%D#INYMaqL7bFYB@R zI7zq>;RS*tP7N(XmSKScv_M2T z8}KLw>r#O16QuAE0G17?T~(_w2R^7*JPPnI0JVBz6Nl~TFD%R$_8F)}!ILEM2qNwW z3BI^z+D(801$Y!Mo&kVEa2&G1@LveASD1Z@6dprFR~xVy^+iAjh#8Gh5(>}~o!Idn z5F9?-_Bh!^kCWM_N#b$57z_Z%u9E;T+}M@9z+IpKtq?I108U?3XHt0q@fRVtM$eE! zYeakw0IT6Dwt*N{fKdTdas_9U5~B@b*4c>X$7)1hgg|7UC5g5G90Y*F{7?39pNx?L z>hl5xcmgjj0YL6LV18!!L$)r;+CEPT&m!V! z0NA-Dbb|~%S;z;r!wVGRImC1Xg3T!qHOa6yR#TKcFObCZjIc>u!k9@Cbbg`Kx_trqt$eK+S&uj68l}*f06{(3I_oo`!O%;9yk{rjIxdbyn+{}Z6;bBq0$U# zexgOszewR#MEv6cf>L-dtc4hMN9*Ye5(?1?5oT|l(e|lY$@=Fy&BWg%(HVd!0La8g z;F95f0H_pKfG&tgwgCf2X+RM~8TO7MQt)trQUeHzVi(cD81@H(HWUi+8X}t8Obq>4 zm-a6Juy7z_&cgMtF| z!i#}+5*ev_AaU%%CO&MTvafRn+awx~)Fh5g^U2}GgfIfIJ8vxdSkBPb+OvP%HQ|q-)+5zz<-h2xLJ5$O` z-KpM2$c}sc|Iu|O@HSQN|3BxrM?{9CD8n_+LK0Hk3<)8Xh!UwZkdji-xs-XEhl>oC zq@>JcDnmun7E&R}EtFD*YgYKbKhIjX;r{;L`+6PgoacG(&t7}&wVpNXbq-1N!LbQI zF<}=iTWS8A-E>Vr^=B~r2%y?}HCb0Wd-LSY3`2s8Qs|4}7=UJEE@k zI#3eNW4RhaH?b8P3Y3<-&u83y#naVvl#bxLCf=IY<84Yl1k!OM~ufZ=`!4GpS6C_O^8W=0qq@Dhfn05s?7 z1E91k3e1gzSEL}x;Q;E*_A&ray0()m2?J6wO!Eqf=NNiQBF9S>XM@3#7>HphgmtY% zE~P)Vb=!^%7=&RHfG)Hk4MJ%FN}Vl1!4N6DjA6G2(x?&whB7@+qch+Y3`cv@zspm> zOAIUH)~JU{Vla;QQ|^|RqcC;rS7f^Rw*f;iTmhi!cG6e7-Nf>4WMUW!)raD^7C_al zim!GXXfY-E98GG}IZ`?`F)%Mp=7H*wvW?VabXd`0xEpeqYRFYTyPDFUg+tZ%;R1%^ zXbBJ}&H1)_iuS|<7WYB}M&M`*kVM*oYg0Od0I#i!Bcw1A!(CF)&8l9_NxU}Q^$|t{ zI66Y;stZuplaEWA0RYfG{AEn=(q7dKoCe_3_xOta>1Lyzq(AM z(zrBuF=8x^kpK!F^>HZO!tCiPu49xW#$lKOpb}nF)ivz1w??&7Z>xa9Z{k!tI=S5q zBRjXSfo3pTM@KbDS`DE(Q2c8*qTUK|BN;FvhGPqWDqt=ErIP^alYudk$bn%WfbLau zx>BVpn!DMw0V?_<0P1?4$#DJP2W_IZ&u?s?MH*fGc?ezHM`heacH|ngK@P&LL0|f` zlW0@fby6P#)Fu<-1Snb+0nml^58Z`+cbgkg8=#)0GJtY7hs#i!x247{EYggZgnE=Z z5W3B6s=AwXXRD|!yBVNaQ&Rwe9jNR|hXb$>6ikqUdY0AzTEp00R!?de404VpT!sNU z=@S5oC>4K-T3S6ptLA0}6D6S%NI)n(#8V-qM<`5{gaInV06V6a4B&pSZV*hCgu>OA5T!{weWBSb*=`-x z3T$YAw2yjV=XwYHc)I~XniO<_m-lfB56^Kxngc=ubk5oiNJKtiB&~DY?j@#3LKU*T zmzWqhAcvPQK)q!@51e4iDuq?t4y_YUm4YJtGyuict^Hh#?MGQMR1FP~V`(4ON7R2$ zr6`m(pd=b0F-;O0>V!zh5wdikvxE&j6|$zc9Q~T9JyWRnS+ZpvZU>K&9TD z;GuMsYo4J2#T0)c&$zN`Kxou;6rzE#79C8Ngx1VT1L)9iIfv3?&8*Ud2B@CZ0#Kp0 z|H3uVS=@3p(HT-uv~30;rA#WO()<)vV=iMxLPRo_VsE(Xn0vjujzyJhj2FxlPzvBd zBvhygI(4OGe6=z_*?s{)J#y8FZvOHSxum-ZW=Ww8j#L2M*oB8&f0RjKClqRoD2r#3 zCtl`uh>So01nH6}$LF^^@E+ZQfR5!#V)vHY6A-t-&q76@}SSsDL2@K$&UF zDM%BM_k7(pq9UFgecdRgWMkwF6Kq!(dyTk>;q^I4!?0J&Vp^-T*rwvEBQs_zF#G~uw0-}1jXr#jO9Z0EJ zJoUYsMBP@&4RhX*6vdHQ zjdIh4onZx1=}y)#E0P-lmK!4#pZg=Fy1SBnNdl1$#-5~XN8-BVE>DT>8ZO2%MzRYFDf)jwPT^Dwyk&6kbhCZ+tN~LE_YNS(uD{uvB&>~ zg@!4Pul8bLKtoPJGa>^*5(Y*4cke)*6n$OIiAvk+#V)M!6FojE24KC=oDUHSg4$Id|o4S zVoxMxg&=)_D%2Z`?z{VXy$%fEq_?<*n@ubhq453#c~;YVoE#~wM^n%`zX1yG82~!+ zP5`CppSWS>5-F&k-UpzZHD<=9^q#G5ecXWJYJY!1==Scf=feAAsj5lr^N@tX`%H>O zjj0VoS)~Q)Guz^L z3j(Qb^Sc7d;z$8d;r1Ejn#?2lV>Y}nKw-WUfLihZ-=cFc>eG6{dr~Nm)bx{Sipj9k%pv*huyb1uHnDv;u!=L=~Zb8R^zqwmj>?A0{R` zk(6~J^#!!h4ylB-3mH{lsh&{5!tO;oM)7y0@~*1*7{{n!f8ZV|eP?GR z#*kv20K1!dQ@RSC=O7i}wnHjCO4k1)Zigt|tO~T-yeiBvw=e%dcl$23)FhW(BRXq!93uf#4D~5|rF{W(x9pFMhDKh?#|fT%pQlUqwE699 zXs|}2H9%|nvVF*l?y}*9(~Jy5RHQv}Y3IqB^esC5CC}NEGk9M>EewMIRBnr2a^+V6 zpn^J5Bd)_U213EN-DEdecxQlnJpO?sYU7vzpy>8p9XG*$g>e&0MWF%L<5&!!COPU2 z*Cb!)V2cRBS}D}Q@hX?DrdXIZt7d;O@sJBuM%;j9jwdeh#7_=5EFhXnfv=NRUC8-a z1x|OXT7O+fTWDM)AW97ljjV@f38d~_glMd26wuRM6G=G&kg#eWte0qgTr2$POaJCF zIJ&dtZ;=42!$#f+xz>~8_B!&JM~pm@MFtzB*#L5*&*+!iT}BV|v^n=80f|fX%{%*} zLWXwCO?=$!vpkosL0N9+vn(K*PBDBa^_%f*hg1`qdX;O_v({VJ8w#l05XTQ*>QCCI zr2c$VS09Nq;uT5zZf}`9iQprt$*><-qlcMY++(+@588vG#R9BQjZA^~15&kRG@~iC zvd{0g2!6~+1~b9O(!2%FpI)7+Gpj~m-1!4Ita*-stkn2-VWQvv)w$q#TD`~0nt*ZelFG4kUc!v%9Dw4 z4qYN5%B(yzv<>J}9!u1ATc_SLuCMHgbzHESb>1>{t5 z5mwoBBfV~X*bA*P--}rdp^<33f^12@DkhbQ601=5E5z z%nh_aWk63HcaaI@uJ>A(%aybb?a2C03cYYtM?oR*Y$=WBDgUOeHMwH@jff}l)Q8Y; z>DTdYxbz{juFH_vEuc4!TLE-SJ5h*AuVxsc4G{)Bg`q8gYQ$$uD3u<%)it2+rSLR{ zzT{Gsdw6238$w)7Hgz3F^uf~(30>%izKJcMiRq5+k;F4NIsoW8Zeog~^tov^;tvhz zi=jJ!+WD_6_9@NtWK@&<;0Gx@i=h`lLj8ENU)`whK2j=RXvA|kCXr3$zaha!>3ehB zWN)t|`eAq#K*4b4w{CnhiDeCXwB-V<=P~kmi2gpiSMsb(+1+xr4a5>bhO}P5G7LiV z?78P6mOcW3R%pPBIHq}t-2jpp0-(j@AEnSA!#n^rkKW(8sl}x3TDxPvjer3-mH_C2 zKLb$uO!tIqCqIdJ3ClVN71pjR-Oy$ZBPP{Q1JqbQ^T2)&PYS63*_Q=x=J}4= zs(|Ey0NZ^gzNXpdljLR3X{V|Esnze7(jY!oe97J62MN@=-fww!#+)uR;AIRyqNH|| z!2>&`J*j4THh4e^uVDBYK$WN+w@2x^UhZ+a0fRC8p758`wYsY$-(0ISCHPqqLm*B8 zsE*%O(yieC%F1vJGGV|_92Wt!AoF{=TaEgQlu8u*B86c%@;&eF%IG`XUFkgA)iMFO z6T=~jLNK1lkR`tSGA+WVoY9A<#(f9HjR3m_EJq|IU4}TnWT{EmN_`L(8a5KFgjX|c zpjXSnf?uT;fF+w+A|+jx_!6SMK=#C`w{Yv9MUBCKgR^Wqf&M}Ol^*l+W9H& zy1%Eu)jah_#x$uHTXX`S>-eka;G=R+#y6DaIRSPL{}4G5vYsbPda}#YWCe7Q2rGC) zCxJEq)pg!`nTzD-a@p|dhybQe{!bmuVJy`qV`${nb7#0iX7D>*zn3XmEHokwqOlh} zz))ElUHCnAh~~i&G|EoFa;qo4@I$drxZhV`*B{qS3=YLsp33Kz{(OZWaJQmop0t!d z%3PTUjw(mfAXtL08F)JQqfWo!im9a?rDGylnIdZ%%VsJ|GqHE_hnyPc z>bAebYXsQjSZ`|2g1rCvr2KZ@Zi%`=nSNx2gC~h~J8AUv8aL%lD|kd2M!be6w?KAc}jUxF-hYNxMlza9SE~LX`GI9#3@0llHu67%?BBf+x;07gC8`nNoI zx3E-&(1;=j8j1={@}wOz4H4BAV0psd!X(;}x|55Srd2yursy5PSrKnzc@jd6Vk)IB zViRri8q!;`=Dr2~sjNDv_B%MQ^(Q{!E7J|R)6$NWN$wDw6R{AYme2fi{_@%pNz?|C zxkt={^oFrsu1G3x`BPJ{V;Aw^27l~>{@99n)ADDk@cxwgVu%KwXyb|B@}#vC5gM@s zBE=IodSZQ^w4o4*;4f(`g=hw$!aGLss_?F{`1P+G+GzAYIn=SGm%%g$p91gpJ_54q1-baxTuTG+^Ut$)~F)d zsiQzw>-uN;EZ6s0K7O0avWQr!)^YT%)EZ>xDa)k4`y3aw9An^*8go&gG7=1w`ukWH z`J6|5&hNFHr>qm8pFc~4hJFD0E~v(XJ=qGObP>~4mMt<3*ti!MwK*OQ*({4Dln3UmP@Uj%v%l%#x-912GhpDUo4XFv-l6h+hjp+?6K|tc&>X`Ks;;mKUAkt7V`KkwHxiS zG{drFXW=wiXAy2^vE#Vp%Om-G{t9$-*FGq1$*D3|B+h4{!7yb=y-fDUx3^p#(|*pS z0-~whJR@~aE=N;U=yiUEN$D5I-S1Kjmz0`kGC*n4Bqpdz^Yb9>Ius00^iA9XA#z%A z*F7I79*ec4Tt`SkZQwS57-=el3Z-`@$LyEJ1lYIsTLS2&#_PM4g%JQv`D}|OBPElE z4{e|bypdp=g}2DH4DG2LiO>jD%`Tqk?1=}Tb-VTgNob1vB!EJB#arAqipNN`bJ+l0 z{>uO=mKL;dr8|qpVs~*ZqokmAG8{nQ-7N1YZFff^5^Iix5z1cnht<82P>UKZiGl#H zBB85&?s|6{Un?Bbls_~;L)KXUswlk~PbqzZyQgP-W2B(fkU2gRT@G{86c?X7YhSnt zjnD-y_Qd*w?zVPtKrmJkg#p%jVBj7Hq<94bw4M@rVDDB3RQ3wvq;M6$cOFQ*{HY@f zc?l!br5yCc_O%Z9>x}za(0EA{1vuq_m&QBbfCEAUt_Dcrxv1{ju90r<^A-Rm0>K0+ zsH-pdA{!h=#Y zFj*3%Ff;^EQ+e`x*TF4Efel+r1z3YMQthv~m)gS|Sr^;?Q3OT6}QaC|5jHG81VBO_3 zNmatrAF?EAk3@U795IYk%MC_U#`7wKn$PO$t{FXfSR zdye9*#b^8DR)r$%5$ku5s`RygaFxDQ!I-`T@}~fsvdt2C9mHNwR`+C4N3IcIGpU53 zwL$m$oPN*xj&k~rFYt6}U60`x00p1A!`<$$)}$<#h6dEZ@s~eYdw;Tb3dC&Ld$tsA z0En^(E$N2-jQW&nSHy!b>xPI6uc&>8v!bFF8dYEM=H6luX4UvY!(tmG=RwdRQz5#1Fd8Tu$6|n z(z*$<9$MiDh%T=1}hH`fjr}P~s18%|bJb>cVz$pQuFbqeAWGpn3}}wyBM-DC=nFhS(ARF`x1?|zhU|~ce&OD$lQy6Qz~^4!s)TQs z-@jx7qr)Q9E`tTqY6-d-RD+4~R9dCgX%}4aA_2CBWMnHm+r8-WNiJJU*f&F86ud2| z+X21>P+VNWh76^5U&XKB-A}%ZXpQANFENefHeK0UgbRIX;2lY{!LY~Y<}bczq};44 z64S<-B?6+UNEz7{&pv;$qI5MnSs}UyNiCGr9T-p1CNp!^UKOQfI2HXo!g>$Ga&0%Ds^b_VyeBUWpCWTHIYWv3- z4X8IUJ|8I-i!~t=26V|Or+U?0l>^FRa<&wA$LsNepd%? z?#Iv^Kpj^`F&DuWGkB*V92XE(vwT-lT_D;(s>|qi$aNV7=qIR1W?R#SWLm*8=-k>G4;u6v24{4t68J ztJtNfk>FELM{PW6z!Mnm0LVjnCld&zt@_v=!Qg!<^u*BF1Iw8|2>28QO_U7ih2aqo z^xZ@K7tz!a!3UCf62sG;*vM0u(-`IpQ29SS-Vq~tW=J-Jb&}|VV;zJ_;BM}v(!-piGS^|iGZ;Se=Q!|(Q{X?s zGllh1=nL?*2V%Y?ru&jG;8}pgUQfKw4rpbws4s~Pl6VfoZyqS-OJcGw2?P28ob;~Aw3Uf^jtD-I#ETfN^g#JE2mIuU zDm0)!Kq(Kj80&y8zAQhM!T^99091x;KX+yMxGT#Fc9Ez4`fbpK)&A8KS>xd2%@hircQ9g3a$en z_(T#f0}S`Tr~W~6Z3lz~yaF)ZH?SL%T|YA7ikN++{ZlCn2Kdi&W?k2M+Py}hMhwA_ zhJ>QfR=>(xldgy_y$7F3VknO6PohgRFeFSZ6%bV~Y~(P=SzfB}W*5+|AzneAHB$4h z;B$!%2Yn4x6Yt>#G|6B~>ovBGB{X0JhBW{RpN(iQN^bzr66Y6E7>VI?0L@0`prCYH zQMZ@bfB?g800p1?8(i5myCxCSYG0-ZWjqLZQ*so@4<5b27h?yCu_kscrlou!EJ_Cx z>Ww{*{1IMdR@M(Uy0R0S+349UGT1C~48#FQ4PEkU&%Mfj3MkX%RU)kP3>}N-5U46` zJl)lX$&@oA!Qe}2jl*%$13fo7U>=8l?G83Km}Wn9#83$XsP54n1bPdJh>0PlEPGg z+9>FHUS~3;v=P_CxO9<#L^K&KL-hpl27j`czl+nT`L{5$6p+LH?)i4!@|$M-&jPTz z$oecPD@|o6s;Lf{TO-IWRBwt-$J!WLGoyKX+;u*h+Xf(sKeN!_YoRkh+JLGvEkK)5 zx}R>BO=YVDhV6M)S~k%78f- z9tBVxd4n;i((;T}+o7;S3a?@4Z3@)?>wF8VfCS^&(1^JZeIRuG!^p4FS8};MH9IBo zI)?rp_=Y$oppyea1Lk2E?18ncUkIq>6?RGC4Gg0bo>=&qBd(%u5K}@U-h@a4P_=vd zdKbW+=Q=d&{7w?{al8r;C(X~As?ysTq?SU#fVVKD10<7P*G~I+P-5PC4+^`bkXV3a zJ;W8H)!D3}bjVfLEL%ZSDF?FxadGKwTCyg7I8qyhu@dxz83$p z-~YQkIL6|BGTDrztQBq5S8s|g=di4&z}dD5A2X8peL-(>6o|0FCCJc0?{ZMqgW)MR z-S39G+A>^%xuu94Q9aaKtG7`_486JTJ&wwHSp4_q;k0k_bB)1|02NjMWIZeHimF;l z%d~<8%3e_{6i{m=hOB40hu(1E>2!y*lmcZ-B!ZtH>?vQ?Gu1aa(*5hd7rFOB)M^cAau+TEZ&r;Zc;ST?tuA`fRM1~HswW!d5 z4*~2!9+4^1?Y~d*Qhz}r%Iy#Irsziy_n@T)*T1eCa^yG`u{dbJ$2jymy9&AwFi%#x ztwqEZ1rJJLBZgi8x~bQ*O-^b4-nQVqH6fy~z5y(2>4Gl0T+YyjU%d!^)!|Vb{Qy*| zSEagMs5&ceOo2lKbotr8^*Ohe?I|1ko8-^TcJd#x%B<7$an2BD!u=-f6AT7)p7u-< zmHu==JM7#&IiyVJ90xs=??S|&_YGoClmJ!wRC0Ma>9cg=N^c|O!DsNh0Nu^;01ADJ zSejEheqX{KGK5AbYx5vf(%m^!>9wR<&-??45-Qmh0Frw7N>?%R(Zg2avj%LTD0?Ef z21+?<#}|o}&dOz-Yj7CAe$acZ2NDT%L>z`FNlN1hMcGrq4W8nEUNxvMkh+B<0yJd% z7(o4A83yu7pQvuD#ajgw{{pA7yx|U4DVDR7voN3aN0c&MPXpF3A@T!1bi`#0XkvT~%p$^cd6BPiJK z16S~OvH@F5m5~#YP(Oa$2$qmoThcAB$T_G^p#duW(;oQBuQYy9(>)kIDTO=$**ETY z`Or-^4A3|&o=A10j}_b_o%1hlpjzH3NnC~@AAm}s89$bw^zLppJP8fZ;HeOR?(%iB z+*;-M8k(E(|6!qEU#l(Z74|T{mqHa3tj0FtS`xyN3t%qcvFClUN5Cp$kK`Vx%5p}W zT0|LtzHUSkop(q%*VtIdQLHsijjs}%%DExKCZ$iosm)Qv??CS?dL;|#$zV+nHe>iC z@YcUU=^3Zv>Upfy9EV-{H>R6*4!KgQ+$kO_HPvCuy4qLVBhhGbL>Da3zGi!yft-I0 zPwREw)Cb)UK>R7!08|7mh+0ZJQ$bYpf1!|E6hl`4 z75T9BuIPWHT@~UpElZoS;rOzMhFT9>W*Eo%yrhs9^(N<)mumoe`Mlie^YRjT(d7t8 z);eK7f2yk}9-VRr*Ikj6za|J>kWg_x4DdqDolqjO4khM=p@`%X5NGDQtC~Qq)>W1K z`vxZ^qjXW6BGpiz=ZYw){5t=ALvp2}C$3a7_A#Ej*5BV+e{%&QQTqQNbu?rt$TUwj z<{G4UlHzhzCfP!ffu(WG@?a5vlgId*9N`vg^f3@cd(Qg?pu)pKA0`I7FcBJ17DwVO z2z4%((Kjh=?C;?qNtDCzHh=<36(3@g&se7t8c-g`G5}SNmj<}~Ln~;2RmsH5QmBCA z1Fz7U*^`v&KH$EMDj;D$yZ*74n0n4hq|$xc_8^FoYO7xY=&rUr>hA88iCGniTN1va zGgZdD-D4ZMb;3^)khF1ouz)HUb^<8wETUghx+B4xvPc+F6-)NL?v>lSNx%?ER0G)W z&oR}HB&KmLp3{d0R0sGKK&|7QI&Ot!Abo68GBH#N*W!rs^>jt9dx)#{WreR#xgGgN z)WDM;LJ{|8A9=eyWMkxDm?UcADCB{g%e&BAfV*}TnJ_>Dx~n|UvZ4dF_jEt%G+YYT z0r)XVB9b*IImLw%-M5M;d<};y9LEQ`u)LA>sO=>qba)g;!G*43^}fpW%>8<)v803s zs2-I?K_Q?cJN%WVHgU^HBT=yb(Cj9FOBDb9;7OyZ#X+itnz|daiYu)$Xac8uIe|H_ z(ieNU*%@zLHy$WY^4tcg1{<=hue8v|Zp3MT3~c};dCR?SmRa*-o(#|_2BRdQJlyFe zp1#=)8LnqQSdq&zKut3HZij03+P1nQ0<>`=7%eFkT^FR(2NsODwbB>wv*)Bs1=wP} zk@oxWkNJGvLG4!l=1~$Q(HbKyY4ifn70&QoSl8aRsc?w^+pb}xDs&%z0d1){y5v{8 zxZQDMC8hrKc>rZ^ejl5PCn6WtyY?)iq^jj0FVUI(fRZRjr>)58Mx)~-q%q4dFqKG) zaV}WB#nzs>tWFBB>(d+ie@MncD)+xImr&aFZv9}i{&Lw*FNssnI>~cW>bX0er@Pxp z;wr>=uu|$+r-3P^{lXSbnlaCJd9Mq2+-R?ShzYSfDm_lQsK0R8GfZQ~1z;+dd9S#s!b7OJ>~Yp$ zlGN0>E(4=55|I^UT&XZ=vIkyHPFyKPk(F>7p5*<-?P$B|tH|FoHg!jn1*(C43aH!H z;V!pPs`qxcxdBr|LDE3=wci5j1~vz@CxiDu>QwrZWSS(^5)XLEhwgNer2wxt%t;!k z&iA+n-}}`qWTlfygMG%+r*vPi#|ob$`mGkt)^PkgCuZe|C}j z4?y(<`jg~TNvd261FC@TB9ls6{I`HY1Cw!-160k==@Uw@LECdpEKs1B(f zZFvDV@EJ^ePUf@zlt}|i^P#TKWbPkaCjaEzP6^ef* zDbjux;|@r*x`C9n(gBdA$=q}S(*EcFHgkRDCB$uj(17v)T~ScGxagnfmY}(@{us=V zLIoV%0aVZ1)A!l`LVY_`PrZi`74h_d&`p@y%vJH3EOcq!H4}-_x(h=9RExLX=_=U} z8o#d7{u&`A(z@gskZL^_AeAPab6-!JC7_C~f2k)9dSV}1DrFI+RX08X&@iadU2Ztk z6bapV`%}zK7pcB^w<>|z0q{?poFg4|)fd22)tmn0s{HQ*Y(^a#P)qkeCy%@( z(Bn@hbbO=rTfu9RP+y%FKzDRB8*r75c+~FKLIJVV2gsD6wQ=PKRR!D2yj|%Z!`(v2 zT+rfbyTt%>S-TmtlO`g6@J1z~;t^H44j)QEsmQ-%r-jmi_p6fmKgi&9}tY#q$X$mD6h| zD!m@XQf&7Sk;L_A>i4Eje-q9jUUG+*O!AVrRDMzxYd>T$(F;%V!iBuBfGD>tm@l%S z?*D91-T%7OG^NXEu$rnH(FnsE9;oSoGzSE4Ng@T{9RL;nv^!i3%yotRdPQi!EjT^^ zP|RNdptJ@6TOxu52}v}@^1t?7ED+EH;2;tzp)SM(rPnY>p*EeZ$fkHsLMnXz z!pW45fYkLE&MW{3cpx_-z+=?MTOI?c9e^Js(FCT29@X&zfIs!l= zqeitOmbN0*V$4D*+=inJfKL7(^BSdNSXj7{Oc>AtLp1>VvUGK7n|hjR)HYS^MM#ut zi6Q$Fx~hyk74-Di2q}RjTk)X*Qo3;s{r&0wpGdG+S-KtfEp|q}-aOg$7H{&^j6@BD zjA#wf8bU#`KEqF?oh{|R3qzuK8w?KuC<465nUwxSo|Pq4Jwf){5!u%w&oZf|Y{o7H z$i5bti0tE0CEie88`?k4IihF|e;{h<&oQ^9ahZS^j=Ims&5`wA+kXn6?<5N-p?=S$yA_JjANps1Wk_e81lE`IG{8VnH{p{lnSK;ycW z8E#FmFPTtZxIz-DS(^ZC`1-aBaI53)LB9d2qdPp1?+*uzX5E%2jmHHf^hhOGDJ|8( z|Fy(xfLh(JXepX~Ot@9rzmoeY*Hu!G;WU5>v&E5!r6tLYYKH-8`ey)?_GI{zpZFiS zjyLN0R!c&qcnMogB$1yhR$AAVc=^dE0lLy$07~cl=q_mT8TXS0YXs;<6$MaN_y|Kx zrGrUtBNGN_XkG_E*ZV}`Cf629o^=1M-TRVIk8=xz>cIW%E>Jq>l>N?hXn@*A_6 zb-+FSc`v8y0*ufYcdZ9HAR%ziUv7)Umy)Q9VIzS0qudNOln$dG(!kXKjcotdPLC}% z{?gF)3nUcx&N7u$x@?Gh#JPoY*m!m?fTHpC;w~!xNk61QCb%CL6ibeARa=#%2ADIz zRibTK090C+PKPDcMFMQe$jF;?{l9^#N4UrcT`6a_{f*nCbu)&u0P5FE)6FXFLx->5 zMlzrwhQB<}tCIuLC_$d-2VY5{5x_+cH0a=fQveLJ!>{c6Qy^kP-GpqBUz^)=-uOq?k8qAjUz+4Z!1i%8}F#(pZ9nxw6 zSqW0(nhKmz=>g72AuJIfGBmO!o|=|7?*9dbJj&#K6o=M$cS@@jh8v8aD$RGmtK4=L z_Co_SpuNcht$92vg?%WfY3-6iYYdG66n(m1;-XM4>Z~rzfHpWSMcHk&j_>ITMh@0&*0KXl`!o9WZyost&ARDN$)3>Koq! z4R%Yc9fo^6keF4{5p5h18gVB?Cr@nq-Bl+xK-e>h?=ci(`! z0lInM_)89Wk_>a#jtWRB7D?6IGO8r-I-w~b zng9(Ay${zgkACS-|A;&NaYKh8`lCeehaBz6@&5Fc-07DIV9Swey0KkgC&H>N>?1lU zZOf>jG#l&%B(zi={3O{2a7_nQgdYZ~v@Ihg;?%nWtoaz(70+vsnjWl%RQk(@_N#j* zjm(ASK52Ht^R_>I!k_-o7(4xP0nr4f4-I_~bh$tMoBs5jr`zdI7^=pwU!vV1*Z9-- z^{3B+C>4CU0Q<_Fkq<$B45?zjibsG-$1|$ZJwG6+hZX-n_Zgh!GdP2Oobp*N!ghQa z`UvD#K7S?9Qt=$yC(}WZoT!P8um_N!F z=W%NKGNKCOFTHvA+Gwm7hs$eI;C#c}ENcX7^O{txV{p-hoct<4VX7E_y2?*jLRETC zd$&GeKn@I502KMQkyWKro7+a^;5R8KBGqv~BC?q-QN&gVeQDkZwayd>T|<2)PfDwQ z6p4)^6NeR;pny;4o|1yq9LicABfNsl=g9)0X zKd?Zm{=fj;pD_TsEkE!qR_QCxTVV%B6Oynp^4To%waey%FS#$092KBSGT8%(&mB+) zK##eP0p!-CcbWky=`zGSAF`kP+G-zWI!}h7H)XgVol@M%9Lo6j(&bi5AaFtg zs&Sv==5cwt4DoR@ZJi@Dud3Tmcv@ud<_;(>R8pai+H*3=`zGm-C+O!MQC1C zpAy5hu$7Ww-n|T?wfJ}nG?81C=Q`Y%bFeJ&5%a8;hWe0ORjesqVq1oJE7W!W=gVn% zRlj=Rt;n%5#6O*B%iW=QRaM5~Ri@4RIV<(b^cfwiiuKNL>Oa!yvcw-}ET>I<=2o@Z z%E46j4D)X1zA=Cf&gx**sl#}gX{5^#pMItLNj&qaKE+08GjU3Wd0U-wb^e@=Rb?uM zSLZ#a`WHX3#huvPx~&ayGw#fgA^vct#g^bt9jwKZd-0O3bQ$6o87`2m(7c*B^uwzw zHt#k9n6CIQ9jjH7NqBXeWQcz~m-W{g9Bgi_nJjni40CU4=l&Oy^Ey}yCfo5U*D}N} zP?#z$^J-#s3a_r%ycO6Lp;mrD$7-ddz)1a{94YBC#A_bZLrnJESpC;JNlo0k$L3DT zXVSq%9n2z0@T!0*?gG=Im53h<^hWPg^LaV6Fn~e8sylPJ7tulb-#EA## z*iv|pI^}d3;;B4`;rc@Jmc|CTofCugDc>k z>rX60{OCLG#OAGt_kFy&P3GOV-8ID_I<^wty?D71=`zG0VK&JHhUTq|Hxn;4Aj7=P zcsfoE2oghea24EzMp?enWr#nITT>15R>fN#FEJy-yq#LRZ@~`JvDNV2g;z~chIp*1 zYl`Nrj<<*NW|+4-RZl;)GF-=AtMNyFf3Pg^?r)kqGxXUfpB!uG#P6$0!}E<%~Lb6$4~Q z#N)5KU%xcB7BlX~O+d|%A>Os5dww`t2Wur`0A9jNx(xA-e6OAG5}H@57}N0T9-Fst zTX&Dg=-3ijQL%hL4cbm-1{38-+hFisD?giXq_3Ptxa0=e8 zc&P#DGQ{J_?lzhC7QD~mRq!$Iu`^cZg9$peG2Y>LxrymA#DC9kH!(DC6TH*L@XIue z*fPw$nHZp-NtmdEo8n%~!FbbUh{tJcT5~aPGrXJds&bikK~wu-fnbu3y%q04yj1ga z8RGeAP0SQS^EStO*;p6!CXHeI5#LO%Y1F}F9ef+^lDKt`Wr$a;WL-#T-WGTp;MEnI zcNjH5;Wkajw#3^8FEKG)hWI@CJz`>L-d1=Y$E*5b-q#25{7XS#iVnUVcYs^>Nrw31 z7u|g_Z)?1-<5j3O??N|92&U@THh5Ryr99GQh=0o*`8rN)-nMvmId6t}3*F;NYuXsr zzjNP#`%iyjEP3L2SwPafSs!w@!+XUzS24}Illq}h{i?iLD5#4!CnuI6egntSwnFo2 zmEdOQ%`op*WEyX#KG<%t=?ST5v7iYYW3}|@QaMWpNHHzdhbW;u)SKK|C1?efmohM{ z2a%tfoSB6MGlXfapaWQL)MSXSqTixqLi1{&pex=>5W$eh6Mu>>fO7;hb+DERdf?Ut z$q>)j=Pt;+S|aF$myBkZxA82S03O!|`>y&diD-S`S+L6xks-c{7(;MdrVlndHB5^G ziI;s|jJq5;WhGrIS`ZlFyy-H;pJq&w~Qe z7}gDJHCP_h49l^=;=ze5EOj*P!ISr8`e5Z_Tz9z7;kXwWh6Qw~ zRG(n3R35^+6|V|ehWO?r3r)-P!9)yu7;Fz1Ss`2A*y0GtOwW`FcrIDzcEcvvCospqj=BbRk)NPe)NI`kEQxx#ctSRV3&`#$e745 zZX+B`F4MS1$h{%C$KlGrX-pwQJTD_sV)Igch$>HpJpon+j2#3ShRrE!1J}$fEO=9D zJ;81VqhY1X5Z`)&_){M_eKI-YdcpOAL;pt^k_ck;$XgZ_%$M4eP@}z?4DpdC?MIoG z>Vt_H)*Eb*6U#7c6PVOI#(zEemgJs-+XANwD?@x4b6nc?QhhKv!=47)?XV2Pb~!9F z3kw!Vtq<5QV5+Y|E|o4!IH>^>Ow4DBy+5>Dd`4V7`_AFR_FjarU)`QVGWEv*5cck_l*wtXl+&c-$4Q19wYg#6^Z5=hP zA6!K^T49C^@eYA)EeaM&<$1hS@#aTGhIp;25e-|G>O)j@!>|{?Zgg0NVTCCPuuP9p z{);5{B3uhN#XT9~otbCS)|Tpn$r;uktSuO^HN&ve18oT^SS*nNcst@Hwx-Jv{}wM< zS)mUmV%SSy4}h^?mXR=S2bp4kpP9u4OC(2Ziu8t43zs22-i6W7ys3Bx;8kgxwa#tnj-2&Zw7ai65vG%M3_+`MIydl_sI z7y&a~hIoD!TM3v;^&zTaH|!O#RbZ-EhD~AOrHYk>1CBI0Xz@;uXGd0Ye{5 z&bT4S?e%hojr`TgWnsa)k{b$k6ijtOhWM)tsZ=NQA=-${88!^;BA8~$3FErdu>n%1 zaa=_3p5%tZ6`SaWtTMzafZc$cK16k`hK&HL55{fEFf3PD>+LcP7sNDAgd{3|)xZnj=b8 z23xi_&uFZ@Kq;OKGYr_n{9n^LsjZUQ7(NUDQN%UFVmdm_zaUo0V6ELeW3i3}WxSSQ zhIi;XHR}OAn}r6er8*Ald{EtGGvutOyDT)3vPu?{HP?8|Yhg7kHpA(1THtW9S!l3E zvJce-69M%3!MInTRzLlqq$F8Lnl(tO*?ZUsKLzvBCS2orL+)Nv;gc zaGK3SS!E!D$(m;}XmL=rK{H%nZ9;{wweSxlmd1yQUd#*|86ZjwBE-P>a|)ks1kn`> ziC^-pE$L+HLsYeKt(2yM-2tZQgbeY^pK*_O%sUNlH@xcR%}|^&9M8TtM5!rg3qL}2%$z%u4VN18n+Vc5EzXh!?0Fh)sf4>f-fbv3M?|ksmTyO zm+I8?!PE>}4V53P0PR1+xNm#d(}v6}F4!WuHE<>2R1ai`zsOmukvV-ZIm6xuyADj( zYS=YB-L+<6!B(k#0M-ah15X*^?fdfmYX+W6^~vOnTMO43PD{JS<$lDb{HL<8V4LLD zfjtIBgGiSl{=mc5AeQQb_2q`G2OI3L48v}G)UdOL@jqs=O-6DX;L_o=RxLw((?hmO zu}UB8F|1)9f~^KqtTC(>XQfC|w+mCi(VN^Kfqe$l;cb9d!B2+x7>?EImU%zN z+YztUiOl;8$8z17`VdvM-yxANz#a#?jQF1}OZ+F7)1~?#E;KHL>j$T*U|2&}6@r~o z*^GB0UalkE40rBv!4<<&8BEALUxLmBRXLmCb!q|QfP_L?W)>UllI#}P6|f2zW=JqS z&=5$nGMKD+wqo53szI0;3ek=<2$O0S8hj_!ZCHN>Ww4%Zh6zl68~FIULKa&zFxOYG zxu>}mcQf4mguWzgvZu4qV7FwqW4+d)>1N1tGLnU^l))x+=J^`*ZcuJ?h8ZS3LHoPK z$)3q#gYPB#4c3>utQlJVOI8M3burJkpmV*f8FrG{EY&PD*dx^)pqoL7s_AC9Dseo* zk6qe@ZIH#??*TOKCwt^Kl%_=EYh3YKZBhE)12B22lu-H4Q)L* zAmSH@JTqK(X@*UVG_rCJ42fJN)wfv#2!58xuY9->MB@uH+`x3Ct|LM- z{KkiSA((e%m|@^x&5KPVQ-&yc4t|l?A*}sCS#z5$LoT9TeMc_U=V(HgXXx*kr+QTx z;^&C#%(jDr68Z!03cQ*h$Ph2faHb|@qz_Sv7O zBoO>Ak&}3b;?;0MhWKawZ8#B{_Y~gQc=^T6lnis9$6brk{zC_!#=RQ1X4W#qYYeb^ z5t{c5-ktu$=56zwJMm#1dlv8Sc-4E!5dVX0G29K!dk$}GmYeL#kcjs{8h55XMOAT* zNaatsB5;~?$q?VcIT!*tR(XET3jrVgd2i;TvrY9K?r zE>})9_*5V4N4d0tI~{@%Xoe935?mHRJ2)vGIcyF2>WAV_uNq9fNOBrRD zw;h*7oefUt*f`!jc!?Y7GQ?YOL#e8vd2`~8rMqs&yud7tdG~T+)s<5^R@;|4I&ZoR@z?vAH#Dz;@zZ$KAkABx6Vo7%>jMK0 z=KrUqk_T=uoT82l@tmm8G(z*{#XA?TR_x5%5HDlC;EayF4DSZKsw*9WN0;^yK)bMtd_k)+wKqM5fB-Wr_voDMFC_Zqxv zV=}~da^mYa*1VVFy&kV-CFY$%b)er5{?xIB@ZK_;=f4_K%MuS<9S+TX1@4XDpb?01^ zP;e2f=rvLL|6RB*rO{o_7c`aTxt^f8q>M$8tE@kv{<9q+hTP zSn1BE^&1Z<%LLe@NpFhB@MJ>fBrV_B?MQ8Nub#0cY|$hbD7_qbv;S1vckSJGR}Dx4 zIFF3LR&0M#dig{8S_#{Jq@WyR|9_QI9**c9uM%K`RwI>@9CP^oSs~seU+aF&W>0mE zWY`4+TH+EMS3=|>J;CCX(&!WV?lgazz!@cL2~RzrMLn)7b5!t(v~u&Iyr)XBl%k_* zKPD}%-^7I3e$MJ1YhTV`O?n99Cv*hlfvy9f3-8Qx1f`#Mch@Jt)|&OEL|#1gAXQ`y z*t($fyIv|P{xo120#))do&P2<-S}V0zS4@7^^{Y8tW1T*<>SMxU{ZbQY{b&utnFJ_ z2n&WvE?Af|wulTy@s# zgry;O9jTKu>~cOl?y)xPOA)poH7&?Fwc)6h(~a%}ru%x7{q0IC^-idI@t3TBqLEat zSak9ge0%{~1=7nG%s6&pxGRopowVmg@W!`^S){^d4cn)BFg3j(PX z<--Dh){+_Stk<)vqk>`7f-p#kyPD6-{8`8PvtC2)YG!f4C}Aq755QF5ool#I&HJRzmJ*DUf--k0fHK{N zulFi_C2+eJ4JeD@N&x$xIY!4_6G$UfC>t+{au6jTQ~{52-AZ>gbU$@vKzR%meI`b9 zbeY)5V>x=^V1g8Mm#YFO)LqKjtJ2R%>C!?2Dq={~@)G6SVuB335C*Ca5V9!sLQInimDo4HR?dB zzl7Z#q4vGX=AahTM!N5*)p*oS7EBhPnN&*vT}Z}_t{0h1Ziw!o0o8Hb;d9gAUYDD9 zOWPF(X(*Jq7NEV)O$jSA+Fz+o{#SKWgN50bEAItUMIT9Sm5!JiiQUUpO%b4m@dSXH zbzT6a2LNiLU_dPlg8)?bTk~8_X)yqenWjoXF>D+_Lc#DNQA_FXQ(P4>LQQ_QmsrNS zmL%@<64NB1S>s{=g~{4vLg~vWXyj#p0^kM!N#9t-eM5gKKs_??suY+rCL-G)6yG<@ zk68NIINPom8c`R=UJoqwz;p)$(%zaz)HdYr3d zMBi!(W{l2NpoGR>^Rr`oJ0}6PjDX+5bo=g`2ke>PO=3@>D&mhzr=Xt+X^>Nn_*{ z8qf&CO#q6}{A!b>yC+BVKrTp^LJE$%094v#bGp*Lo|N^z&;YHGbO%tZT1D@!^zH=J zvj!5gCD9nm%Mc2UyC9Sf0ni;apb3U_0Of9~&*e$Ji%2emIZ|kfV+nwAdHN-n%QTnE zP=I}R@>2jcvc&}5gx-{(!&0=c;QX4fTlw$>n40Ud4X)XqCIBeth*~8osvxa5MVn*Y z06zCIDP{}*YVq;AKN2|%?^L>AO^^?p1= z3KwKVYlz6}F4UZ;;bLc<4K9+rA&EBla{{QoFCvmCtu)=uTnuQ7p^yg(()SAZmzo{#?IzMr{)UY_TxX9ehrgZa|B6QYvO zZed!XBIK-#QGLxXG~g}_*8a*^kd>p>Tsj>djN z!hrTTQUDYT&zy2WabadO_9zMqq;L<8I{{RqI@9kf{mBi!Ljx4^+XJYxs7NzanrOSx z^_y=?q9c}05XxTTZ7!S5={IjA69(Li<6(b}Z~OrGoErcK??|B&z@z>gPtekJj)I%) z6rll~G4ujZ5c#+h>pyxE;rrs(a~%sMaUY+bgHTC4PeoH&gGQ!FsR8$6cnLtI{0x~; zIyj$ghzS-+p$mpr08~XE;od8)UdsLMzX1?$(fQsrz23w?w$mJgy5gPFXo*eU>M02zy zu@MQ@YlHVB(Gx>%0M)ChG+Lz#8G}$%p#i-xCeBo>0h(_(byM}lv73~G=yFy(LNvzK{c9>(&m)buW74Pp#jfexYuh9AKOy7 zMF*~Mqap+P;TR5}OJ8uo)skJ*b`8DXm%{Tn#sjF_^A2(4f8cD>hH zNe#d-2SDAxH%KYH_GN&}*$@e(5hT=t z3`phkhX9&wTv5mU9LFLa*lYPdp{L=^+5Ek{a*|hD-o8@Pf15{~;PQc5{jkr7#%BC2u&O|M%2? zkxLvA8ZiVz_V1+L!mZ>vT=0=3h60pFLieLL!wsc-sA!r97%&V&B>+{lE53EZm-0-| zxQw#`Y|qWdl1dB*spiq5p+gtFYnFwdvfy#n$W-#TQKBOu zZt&!(y^d_!+sIV{Y+=mEKsWU!P*s@S_q$-wx0@3+R4WaeBsvP>7D$c#^6hma)cFh! z`f||*jK*=Nm-^}pCpDQXp=q2IV4D*@k<=JS|BEPm|CwQ;!gWc;KTo4?auqotC) zkCQ3w>`FNJR1)JbJPM#nIF|~qbQ}P6Ei_;}hNpaPUS&L|3oJ!X$LursObQb)Jo^UY zpDTGYuf9BdU7M8U$Z?bz9gi@G_V=aIRr4$WO>1n0{RKL)GX&5#E*y4ek82bsl+})~U0<5loDXAHFzJk=Ps1tP+B=O08 zuIDjgCY~LZFNO;DxQg4Nt6kZj0-^-ZV2iY7LGFRnrTtmn-TzS->3Uir^0V|o5lAkx1|&3{Y2k_bqaD$7f|K}8 z5{f!)JaEZj2i(go=dn^~z!d;@WhoqVKx=L&K(Jd1>fkzhV3e;kt-M0Q2!+1;Ju%xa zqm`rLOX7P;Xg>d-2hPoN=eT6O%Y*?M;Pvppr;{D9p9_`39w`(B=;?txJ`T}ukfQ3N&!sr zK(U0cRtKn7k}#q)#57O5;Lq`~J4f)7B+3BH^1v5UU1@(wy_13gWdY`T;1gf(KJ>R^ zpA^ag%m+xQMiw0AB;FtCB#bDJW1*L5?KAP7&%}O7Q~+28pm_1c7&q}dOPY^pDWLFb zoNC7n^Shb!=w~fH2M2VtqUFXndH$&ekcXkR(psci+ciSrat9J>r%#r0gM@|s^k|FU znURE|EN+;6AGm{PtP_#S(pmzKL!wjY0lWGj`ixlGO|1LnNk#(x%y7-mN zoP-eymjxN-=+LP{oy1?moWwy%=<1UJ6pyyED6I5c7aPQf1}Iuq1yK5J3D;8{<`m3b zgJ0GESh%bWA*meW-9pUIsV+$Ts&nYVZUj(V?M`p4v<0Jk4Xl3ySXV;D{Pnw>U|SKbv#u|~X};q@l3?|BtinG|cnt6zeD-sY9T>xxxLvF5z`^IFVnKd&OI zlVYuSrSe+N>maXUYm#DZdA-7GC9mIkCBL5(yNlN_Uhnfd%B$=LNwE&R0$%HRo#0h* zZBnc=ud%#7=9M_ZKULQy#U9`_k=LiZ{^C_*eNwDDuPMAjUXcw+vD&dk8|ubsT|eoP7QdWP4VyuRo4|LA%XxT?zSef%5*!+`_^ zHFH4as`xZ_xu0jXZfsmJ?ov;JM4GwvpJ-%K?K%d{>_0>4rzj9u0;eO zb0HOwW=Q{ahyWx4sf4sZ2G$}1kUU5gq!p6A9ua`#L#iQdkem+@0Z0KP25Eogrt9t2zdB!4x}8?1j+mY5rE8vR6v>`{p%3{NCZ*|X@Lyf zfCxbHAXSi7NcO)F0Z2Zi8qx;I`8Of}DS*Tv?U0cThybJz;?>~44#=2|hybJrQVZ#X zjQa<&Y*wW)mU+nG30aG(-AtMg$-cNF}5NGH?qb0LgK*szB5r7myY9XDF@$HBJq!>~M3H*WxKxRNnAaO|YuZRF-CL{`}hot_72tZ~* zN+Au9o*jq)WHzJ>(%6CVPyZbefXsoELz*C&|3w5Kb0HOwW=Q{ChyWx4sf4sZ26iF> zkUU5gq!p6A8xeryL#iQdkeojd0Z0KP25E3tKuRESNOG5yq@y7-AyG&@BsDoD>3GO2NGYTN(laF`DHk#u zQU+IFGK(`15yHsLz2@G0mw{96jBdK%|HYovmm9A21w7|hyY|Zq^x&}m(&O) zy$>P)nFA?@G(j>m5dp|tNCl)B(mx9kfJ7jbkQT_mzK8%M4^jnbg=F_b1R(j4YDgO- zr#~V9DS*TvbEYE-{XK-?fRv=mAcc?`NC#xh07L;&1gV8|LdGA6C_suKb&$Y7L;*4b zQUZxXk_RCQkeQGuq#lww7*T-Cf|NoUAU*N5;JJ|59{wwXG(ys|5e3K`NI9elk~tJn zfXszdK$;=_han1(2&59y0vR|QQGnz@svxb9>>NY^k`Jkdv_W!0hytVl5`%c{_;2Jv zhya8kDFoI)Iv`_4APSHoNG+rjGJYhY04avlK?0)?1;`9Y2_z0l9*rnKW{8s>pLE0fBa}fndA*2S<0T~lU6d*;AT1Y2k{7Hxcq!>~M3H$|7 zfXslDK;n?(Nr(buCL{`}hoqj22tZ~*N>9f4H$drm3L*fR4Jm^(Left~1R!%D<&Y*w z=4mdZrvc|eDj>~j>wh{T0f|5=AuW)BXCM-gJV+I!6_R}>A_2*VRG;Y~6i{-`!i0bn zKw^+~$jGx12}mKN2GRi;a}FW_DT35OIw9lFMI<1_kUB`%NLP!my z12X1PL;_L-sfBbx#$Sd=K#C!Ckig}L1Y`!J1QLfNUx7$KW~M3EYSXKxRNnAaO|YO^5(wCM4?Nzj{dOOhf=O3sMScfb_f>5rE8wltCIH>9-&P zkU5ZYNE0OURzv_Y7g7OfhV-9>2tXo`N=OT2;BANiBo9&rX@z*%w<7|Od`LB<4U%&Q zA^<6X#31dEk+Ts2NFk&K(g7KBCn5kTg49AfA>;2t1R%wbI!NGdL;x}aQUZxXlII`- zkePEZ{!u8ndrqncb_pg2Q-XY1-u)a4IOGt#cHNr)D85&?3*Lke7tVJoUed30D&8;; zH;9@n5T3tKG-P_oX~^@L(~zZ|n%89*J&PY^ zNyAd%TCFO4n^cAK{g1FH7GFlKOE>1KQx&<|M9pFUiwfzGlZyUVIhENOMctnckw@vU zKOIUv(R-)jH}h*%#aktN;{PxLYRrn46{s=}c~)LwKb zD566zI#f8-V=L-K@5i=vs0wSzVmkMwHKr=8&7x+qwEQJ>$Yg2ds=^^I>VeGGt}3j> zK7EJKTCFOqO}}MXW=dQXgfUVk6yAR-6b+L)=1l0cD46k)7px9ZFy~@siHm}j z8F|rsM8Pk8#LN;G1s_I4R+x_{_>HlcTD)*vG`zJNDK{rk@Q^fSmbfVRcsufv`G|rK z6k}$IQ%T-~AGp0tH;iMUDEI_8CJF~s6ufH^d4)b6M^rR?9v2gZPNLwYmq>-xCkh^T z#>^5I1y743ubPi2c&-{VOI#F8#>i{tBg#UF2=LVlGBSVJJnxq-qicODggXd^~&|6~4WP-sp8q6jumP-^Ypr|OUOB9clQOjT``NJ-8P*~?G%S|O^46KuoQLLE3 z@x9iQ;k%l?uojBK3PeE+;-YX5i_r&`LQ%NnJu}5cK?5W2nUg5o49yZ3r3-xCHy=?j zBr&tZsq|z=_}Sx&_`wyBwq&PAK7c+;bnpRxR#DCkNkujOyNh%BolukTi|?a1?W0H5 zBzy|@(W7hpaVR3Gc8!19;+!F)JIKjQUP}erY>6n?UCTtlz%@}B>4!^)kM?TTK~LY? zuDxw#F!YyH9}oRi=xhwT^rq^J#yhI>1Fo3r{3Ldb>iop?L)C{u|5SDEGxaav0VoUQ zr;V5VEk*Q^&^M|R*WA~t9|HYb)sKYUqWV$Lcc^|m^q-+KQk*9ps&g^?p*lb8OMcP$ z$XjjQH4$Pa)ki?@tNNkP2dX{+`Y_dxhCUKHqrkx(r#c7zDAoDt{0XY_6Yfc>^TX^j zq2mIGmn9(QE5T2oFHxOa{4~`^LBC%0vCwZ(eH`>VRX+^+T-6VUK2P-{pg#niL&VQn z7eVJd!J7eB1)k6ZUjitF&Pl^b__FGpR8`Q~D}K89f$Dt0;uF>Re!xcPoMe3aX`AZD z;=ugX$_Ky685Q(WUv`t5L(@ZbHe!F(IVtVW$}?)2YUljyr#jyv7yzAZ;xGdxm4d!ovW}KIwQi#U86cHtW%v6x*j@*kyB=q z>YUHtL1zSWAZ^eyIDfdpb|HbQKY_vS(y1jart}~&tKalRTo$hhN>^@fsG`M>nban{C z)_tMs2tfBGs-uD3pHdwS>|Uli8rA(()lqr(x1lot2pH}=sp2#Z>Kw`+ROdq3sX7jfCDUM|5F2eMRWBo0)aBREubuKp3KbM=i?ovZb5)j2(mQJqs_qUv0T zCquWnd)Dh%987Rt{#6OCtf{IqQqxuE@}8kO7tO7zbMD>+ojqhE|EW47{ebFR#Sg2_ ztzn7kAuf((NU*n@?9Z#tkX5M8)%q567RW8&1J#E^{}?)lh^uuYbVer|@}244w-g@K<=qJo2UI@;vgTL&P8)6Y7T=XE7wxF3chLYj2FTA*OD)Y$uDm z8!tbyJBG~DD9khXWmV=eWS+t{GKQEU2y3Ts(>`;YA^Hcd z;y=tcKkhg~$2AN0JKkVpnSz{ujx26x#=Dg?bX=Wq+iP7$L&rsaVZKe8&(Lv|!o8^z z!_aXpT$?!VyrW}hij0D2b4wm*beyNRwA?v9Z<}SQuNXC#%{_iZU{@ zMCL*Gy&f)U!|Xs5W8C)pBD{u`QH3UO6Eei}o6`TXxRr0D;Si3A*r%X7B{ciWilkwC`{;C#~6|++|4|z zwmHU-OpP!)NdF&w$6}Cp!^RO_blEKBx6XXNth!qajVdfOwk_Z8KaCL zn@}sv1iS~&t{UPOTCtu;WenF$h8Tv5$YUU~lrY3FGz;++_8ZP!LlDLHB1iA0ovzJ> ztfWqu24xJT{>Xi-rc>9mVJ;p#ljxm;SMyknA!l!kFb`r(ITH=Zl+0tEtFWbzF(eZg zrUza)CgYInA5>l*Gi_XpXq2>GzCK|>`A7PBFq9!LyXW(GKD_#jDOut2tzV8o=`{N_X5~2LsA{We2T_#*bT{)7fW&FZnGS(NTx}c zS!f(5ks+C)rOfl8&TvCnf3-q=qJ1@FrcPlF*S;E(sd${l^}xZJQ{0eDvoMRbG($4Q zPcY978XrZs{_2E!;2~F>Au~lvnCW_*dWK{wg;}ZbF(lI>On;4wA(@gVS=!YaAEL)V zk%|j-o(9N}Ox`kPx=DLvNTy1d-*q_~l4%v@VluIG9**<9P{+l zy9z@xwZe4Q4aJa5r!Z|gi40}^RXoq)&eTvZRE$MWbgZOX@Y|xl0$QA(IyX^Q%ffET2#dC z#n3gcQ*s@K{g+KQWUh)zBE{k+{pdLbADM)U-ypT;YnR86 zOr0=`);q?KOyo_rZaIGWki{9|879s-mBP*35#ExB?~|^*vIkJXONk-pd zxgX%^3rRyI>xFzyA3GT;ng2F(o~p$fDp^g^%Nnlb8Yl^QBp`VPyz%6Fup zk_{v?9>Mm^<{Dz`3#v){u-L`bA^Hy{My7WoHcgI(AqTjfOjff_3PY`_>|K&~>%1^j zvQfxWHK(DHg{zpe-+GKc#~6yBvNdEgUvG5HF+|faIXZ;90Qc(bouQi?<*S)5SNBju z$2AG}C8EME8sclfS!hv=8MAc88M=(M!rg-Xff)@Q*C|}Zla4b?4P>LtiuYLNWqR*x z=<+oSw-EKo{!fLyab)rP%or|og&MjkUPmtLGo5XQ$jW&d`GDjtI@=7DtR&fc0!}@A zm}4kkL(n4BDLVRw9Q~3SmbhNW&`?W^3wg5+ouQI>YnZc4&r60%RtedM#d7_LLJB`z zi^6_`Wx=UosAP04%Y79AAZe&%y^!Z403;2S%wLC`8E5FkG33`UR132hzoN-(h9KI6 z_-U;h2}2O2wJht3ZLT6i5F1IP>pTi5!d|eR85ZNb!bxNZ!ixzrPN#_>n06uV)2U$y zqU=MK^@bie4M8-L=-q~xbBGN&j)fnA$vRet%8-HKpCPIdc9xEnAz6?eLjFx-YN%v+ z9m{=7hssdNCLv37s0@`X`j|NnF7{jq57zRPttFfB0+s+r#1I|n6yoalT^tQTRD8l5 zyEKf3Aex2Pe1$8@5Jd5(%;8Ob)|F)lrcRg*`k{a!h)A4S?$#kO1W_r(+eNM@Ll7-O zJgf%@Ll7mOp{U;P+>icqu->`fAqOi?D(g@@z2m)^AtQscQ{Lw!pTb<_b2~$l*mA0b z%=y^mG$e_ftwLU*ISsk~m^JzZ%N?bxp_27N?!r-yEizOxzn(dp>l|sQWVMjRN*XHJ z2GZ-1HBMPWvS@GV29|r_d#+qVB^!i%A0!W?hDsLvi#aEtFKn(MNpv(O>s` z8fv-a8(Hq5TB)IuO+x;PkG(im43#YUk~#Yf#`AA}_Sy$cBk~6=sugyow%1U}P9c|T ziwu>l_=@EoucRSK1h84iPc*uMGXES~+{mnxb=VBG+&Uo}P%a1DP|3(9=3J)b8Y)>S zV>>Va~hIF&iu{HIY~)FIsbyK7B*Fy1b0Qv z+9u>r8bL!POSiDx)k+#F*&yVFN*a>5Ft^AWGAr6r zCu9}IiliY)lpFb;Ik%v9Bn_3U6!HS}f~296EuOFk;L1tXP}!0n7@CSU7aBv>7$?*J zVVoKm8pA;F-eG~JF=%dHEBWQS-KV~W;Gel$_$uL_-HH1T763&sH+bJOw~0fmILybl zRdQ4e6MS?BE9g1MeXW#XZtwx-&%s(0z79O|8z%VtADO?o71w_@pt;rI(qL**{xBj;e4aH#v073`1i zA?Be#>tj3wT9CY?yE;=^%dw#+Y-!kPUEiBrg0b z8Ue#13}HaVIPZ7ne?%i-$nqUu<%z@3XaGZGm~dzn{sW9LBVd?lK=i+?;Bt+CVWI)` z!cWi$7$(a1@^`U-$29_m35ROozefdZfMKG7HsOcp`&EXC{H2{N{~!!8^BeBvy>Np# zY{j>@GQ@_79v19o0S{`34HFHB2|ry!Y`C|AaJ%p|8ev1@@%eYzAFN;t_G<0}hTJIJ zI&T#ISe*lg3BGU-^Doy18WyDol3o59;qTW58WuaA_rD$D(4s9gR0r_oks?%3r$b6gz|ip?ccNNxn22S@Auvqzuv7SdXb%nZ_V%#iK2~s%_RuiV zfM(&})(#p9@5xqLJdXv8-sy(GFtJzH3ICQxz>pDe%P^A1{7caQPBKGQ;P^`6JJA60 zhU7ix&>{}?SZ0iXVWI&g_p^c-8Ue#Z1LDG;q!BPoG$8K*=3lN6Fa(eBmk3me!|$ko z4KPep&?@`{j4^q`LhqJLR<|QLPPRymbVH2$XD)SWC(t^<4fnW{0;bOX^Sr$d4dU>fUd;>>6%;(g z0vb^P8(_!^TnA#pPut~&&@gYjE#r3KAJF`U9vp(Mg=Gs^!Rhb10u0H!3L1s~WTb0= zVS+C#VE+4tyZnZ1fXiPa{6TtPQN;Y)dm`x&hevgm8z$yJ`9fB3lg@I(#27aT|B23W z!vtUSF!N8sQ#hW89Af;7&$AZRibJEGhz!9eyQ@{F@NIe`G9>R#G!>7qf^0nz87BB< z;h)nX7UcX(I20GMfE2yn8z%TV;ZM@7*pR$yVB}Hee;XBWjuH_E<|CmoU?FWAp8a$;tfaQ{Lc*m4h$h2@)xs!79C?l z@)%?A)xvjbhz%2boA4Vn#DvJ_ zKuq|2+?sKg8zyee+J*l_BjA~ma40Kg1x*?O!^FK?qwveL;f4vma4GXYiCa20z!0}? z!`!`LjqqtlN#wnNDuINlA($D&Q(JM1FQK2hRXXSivGxzz!HD_-5g|{lPhm z_3xAb1E4p*#ZR(;Q*{a$Ciptxf7S*VCOQyV#{7BO0K)`dDg1q$Lfn5u0VaCbA`Zz~ zfMKEsB~P(}HQ>2;3=@1@_^)wO%_(5Wn`?bM5M}-(M8IPKREUYY?<#Sas|_$r+M)uyF*9-sL*ZTYciZ~|jp7Wn!0jD8E?15o|uNM9} z+*PvzLtJZgA8QkStmZdN@TJeP{7DyJ|6v7_E^vsAOth{HU6xMY~vpvs?T1;y54#l!~GB>WRPriO{J zD=K6D<78|w|DA$y+xzIHRvezt!8J?_UZ?QcI=F@jzTyQ`kYyn;+&>Wi5U++blg#Y$ zjJxCYOaXfaJ`{VAlseLI z|3E|Sez^Z@5w_-W#~PBwS8J4%v)uB^R!J2x^!TC~g3m_mVXCVVCnw zMdqA{Ps)TGJ0I(hoq-}q7An z8unyPgg*Zjuz#Mi-2cE-3-c77baJvAf@l+>zY>Oc0$N(Zvc};>9_BE_&18cRAM9|1 zAs!kRyviK2lu+~ncxW6GrX3^8vJA8FoSj74Q<}wLxa>8S^V|XM@?wa#Hj+qtEyJxy zhv9Y__luxh>1g5WNDe<9FSBvg9$)SsFgSFeS3_&s7CdX@l64qvq_KMh2LAAy06N6Y z=Ci}_(n`1mvF^*MWWJ?uqpndz*4#k&V{>YvGJF)`$@&%j*9V9CrWd_IYBKh0Qj@p) z8_~4%TCv`b#Z2q{Ups53Sm$De(mMAuf7}rGR=kN4hSeinwASzNUmg+~klrlT|Kf85 zTK~JvS&LWF_XfPcYIqT=^oV8M{*Wn`#mT=wk&e|f@ zGQ5&PYuS^|TJjc4n5W&I*XBpE(XzN$H4{2x_(Y(fY(avkcm1^4bg&Z6#;zV2i(&Bh8j839EMlX7{qj|7xsO`nyka{ z&&K9gGw;z_g+qK72#oth_{*nmAu&DVCavJ6?fxr6L;buqv98d94Ur?8*>d?GS7GV9 ztl?vfD(iO`uBLInT)NPwL43|w?|dAF+r%egJ_V~-P6e(&ET>}i2rA)HS`!7um>hnI zwp7p`I5^bbYnR+b+E|C-g4OWW#gHV6p^TQaB5kZgeDz4e&8?Y6aa*QsaTspj$IX>i z7-MOV;Lt25A}VO|^R)KN4NJ%HUl1El4Q&Gs)nPCM$q(D#kBrAM6_1750mFSg%HLxd zhiecG>5&^a0fk^sYNE@K8=iJCGz`p2g-c+I+_VR<=!@Q`TQyz^WvHtc`KyOxylZJ4 zkg>p74fkz9r{pWW(s>v*{oE@UOAR~_IWG|D!s07FU_n#x;yH^q+_#`+dJMThw@8Qn z1v#NUXBH2N;LpH)#xFuGdi+vQ0l;0s0j~Lw&u-8fJP5-zU%I>o8nQ8DJ`Tf?we*RgY?;j^Os0MHcefpX^>@4ufbW#46sKHsii^`y zv?&E9hv6S-45A}>>sZ8N@4CAYhv8>vBwHoyEx1ha`MSe!oJQ@Bo29Md_Gydf{61|V z;+tRo*Dp#-+C})oTpSOF;gvN0&n&H{ZOC=nc|+vMRuKs2uV;BNypqeV#V$wvDY(Sb z`lonTi(e})TfF6R7_R?6Zf)Y$G}aa8F#Pa`C@qLslv2rBsF!TRs+-a#qs|5)y^ZKO zyJlGZ!;s1N!kmJSn6(h^(vmc+-sAoC@<2EyY*8V$Aa z9YWrLCXh7L#+QG>oNupkq-Tn@zKLw#BbGVV&{|*gDf1TV2jqs@&RQWqM#wm{h8pco zl4-l~5Q5|BFuW{Y#J>uJJ+2~7`foVnk#-pVoJJl)nuTrG%nrj3d`1>|i>ah_b&nSU z4#RI7TPN%q%uB8Zhv6TEMadEGb7oG)V`82khb|(9pP`Xo##brq2(UfCP6FEl7fl-X z3kKLy?3*p(bB+4^-T5r|f+e7FB~;Q52g}|1Jh1pej%pfFLIAHLu!y)g?I?8BbazGk zEJfsEQP#7F0b0ZaS41g|C?eN2qDp)oLHxLjISg;4F&M$|TG-o^br@c_fvg8QN-eD# zA!3&thTjt!Y`w6(P(33w2Gt`%zZjeUFUgDf%!6dLVE6?ZgJ^oSuuJ6HhVOuI*SJl_ zwn6cFq)oxJXJ&`tNB_;tsJN6$+5x(H9fsE!+aT->+9!wM-;FJ3VBW)(br^n$MwT2C z_IJd)J8uX2x;||Y8e`Tj^mNVaFuZu9W-gml=Nv~sf42>l-BIaiRw@OQ>`2>ZGYlf&@Rue4d^RMP%4#4RvAs(&gh z?opbk^!BkfIf&(oc=XUEyz47JI3|>NW>F(uwqReN%WsNs3BN)nZ9Hz619^Pa0pC!A zkI=-u8}8ZZo~cOpZyTjRuT!!u!0%SlV~Jq+=}jn$ORIuPS~K41A-hvByusLJVO#Z{ z`B%a49vb(@6PHNdy{;X}Si;3$!`k@=#<3yP)5T+Z+_}Zd+$^K@o}NSeHxt=zYWEBy-)O?92Yw9 z*urgaPwS&El#LJ!uMlGr3XNa`(kogGjiF__LS9(zXO9mJ@;YekyA!_<%?|#q2vy#G zTG|4(OBWOoE~grrir2i^%WD~|8;z5$Z?&t_8qp-HEXOQi)S#O{2r`) zJvAz;U}8kMcJa-}{PPYC4M}en>)&;z-CrQqX~jRlw_lgdt^;X`C^RAqshYHGKjL>H z<}urUy5e5HHuH{MHI}AU(lck`zD3wvC<5M?Y3WKNngAWf4^Xh3$ zn}oXp>_7pB;ol_>TA1HPHV==(Sfayl8I3q7Wy6G}RV_Z(zvz~S!|>NM1~CiTsATp< zAh~#kDMCe%i+(CP$@Q@GCwLwB5aLO%C5p0okd|s{>B19GgZPfZw4iU=7C(4IsE^kv zG%76k8GdO?+uRl3VYrOOeRd7;nXWz#!(Y+ZEjfZh5q*@@E?(<5xnqgL@Z$eyGs>u> z?Z(B2+xTTp39mQ0QRoGzf*YK}@NQ!Z+nKq$vJS(q(#RgxP#M*vBh@s{-+4r6=-3X) zGiS6L3y0yye_Rwa~;Oir&VP>Rc2Xb+i4ez8^(!DrwcTyYFBzL)ZsY{XmxygZS zPBFCVncv{lCCD#~ub?{U8Jv^2Ev;1C>p#?G_?Kizl5d!&I_j6W%djWzUwTw%XrJN^ zT9@KRjKw@l^qVGNm#!0H2M$>zl2((*I65>qJ@PwqOpQ9?nyCLH%1N)Jbo9kh z(ue5(bQGGCw~NG?7!zjnzV$avKy#|Zx(XAY)^!{F{f~yVmDWKIV+*I%kNFdi4rLv< zs1w#M5~C>9L4&X((BYtG#KBveaHyx+e`&MZ5)9$XjU99RzvOvCn7^APormYM+)OW3 zq~kRHhaU{*D|DiFwRr#esOh|>;g(M{xPNQzZB$0a^q^L^)6Y2uv$OOMS`)`ie+sN# z1Fa+eKMs}rtHez~HiZB2hqv+tdsxW-IbH^WfoZ{DgkN>XolAb?8{lK~AJL{qxLu04 zaFE~eAI&S+r|I+Bi%BuR808)k9%2UF`AP&n)=~m(bGn;_Q#y$mX)*Gzm3+y zd*M4MaWwUxJ~otfz)D&f;~Jq8XHx%D(mxp8L1jc&?LbDUzx!CM?ehC*O&mh~F~^05 z^l1|7pN^dVxZ^?t(~IWOJ#ohLAA);&tymKWO8+fdJH?tfKKd=RR^*|Ck*n~PVVpPX z|LylX9$jmubwuJ4?w@)*e2ed=HF3fA@1nJi){$ZTJ?}HW@RzfM$OE(ny(j|h|$?%8CnF!(voc|E>C$8cC6RfM6 z))9~3Sj2A@dg>Ct?nFepO|0+hb@-ES{av({E}(DXD(#P+2y25_6IW&b5?K2b6wo(u z{l%pAKRpqXDMn)CZ5T+-@&~{0S58DA+G!oN2_I$g8no#$f7`^+&|YN=>6_ozJs`>V z`{km=jWizA`$4zXdOzr&mWvIu@L^a-?8N55E9uUi{$06fMGdVZ`{DCVRwn2lm>U|@ z`*(5w#MuKxvG*x|gc@zy-nnaCGYs#;v)D>9ef~UsP*ng6p+z#Cf@ZSqh5;Nr zN{X0i6WS_&T9*o4qz!T5{)Q&gWGD+MZ!xo7^`(oTq106+)I`0m8Ok}hRj5;NIOes> zP|kYMB_2yWORsB&0c_CW`X!`>E_K-qc|8l~`=B!Oak*p@4WXjb)xz~diR28SqR2Mk z9;UwJ<=MoJl0nD1lWEv@d-~C7!2jw9zo!0 zgdVRn(WBp9zJq+9x%ixqWxQp?b8%s14s$v;S{8mde6slRMY@BGB%TTD|r~)@B*P8#6LjR!ID5R&(lwZps>FiwFKgHF?j_<3gPfhC{`}HKa;B z-b;057_Q&;>thct^bYJ*G98b(>C)#@%x|{UP&yiYmefhOX(eSS;i(tuQA|<>+)$|e zQnsoTYl689_YF@qJ+i$2yK~7BZxgoU0#~#liVEansV-;X&tZGy8aLdx@CNZ%fO{xj zj11R5|7%)gX^&7LdR*`v%b4R~wy=;niV~IiE+{c^$-_>_iD8IpkaCf27mp}5B6=9^ z+mN#7Sr2?sYQ_H92V!p!JDy+QcS<{{XY7ZI3h%G8715fC6(qhv z4iX)TD5{x6TJ523({vavE~obn)XQ^1CW;}Vm^vZWAUh4MiYO-X5^{L`o?YPnVEXHd z+(%`WRg&-X6Ba*Dyq${O*qIiga&Rw9YMf#ksgjqOYp32<{Z}z98&q7Vd$ze;_Y30s zBiZs^VYXw{{p5YKRgvm<^y4nuUlh}_d5XbeXqBA%Ya0$w+@EcURch33<2K-A!LD#f!uhYV!eLs`SjE9|gM|vKN%yJL9IN-u*(NzBBE~H248^@UOJ8Ho zw*B~5<55>Ckk{XCymB=NcQ|G+zf=88#crrHT<|)wWvs)N&n_FraBUUhQ9RHjVF;p~ zMA~(TBFEBUxU7=J6kto?o}lOj3b6vo^u7!u%&ow%0uR0m-(W#EArCv`FkC}pM&1VJ zZWzH6cp?6YU6kb*MzOHoWR6{WU@@!+gqu8J7U43&E*PRsMJt)*_h%hp2%=Vqx!CWS z!w^KL5EIc05{4ius#w(A=QM{Rh~_F@e=^2u7DF(_Z!yco>S_q0PKb!M*APVHZRTju z_8Nky6ykmiD4S&nqJ@N)ag#EJU`pO$Src*PQHDb0 zuVStjaiL@`!(bs^QmH1Dwi-JYDTm=U8ojiPuC7fE!=d7wQ2tog1Md{zTGu7X7Zs>qs%|gtK;=|8cqN_CGH7 z%V{mCWeMMIaSQd^Eq(*6$H&FmAKMKVkRjeZ4`dDq&c81;&?A%@!N&(lw_zQuM+u=J z=retXeB_Uxj9yfU^;K*?w7weiFQ>JY)||DQ-HQ)vH~SAw4h_5^`XNiGhLycAtjNZL zkjGC>da`@h!J*W7xu7yIrRwRNvq2kdh>w~17krXVN)2=$%BDr}rzpbtAF<%>+KuiT z{9fmW1|3;VYsO|wN|`T;yw4$#oXx*t5pJWN^8wZ)WB5svKmB|}tF(>zpxgnu5gZ}=V@wM(($gPBWksLSIYU3O zJxUsq?LJvp@O* zd^XxlYue9xwsjcJ`vOL7d@=R39qQmPT(yrw9reB~>R`w)Fb=Kzct+~!`K@{iju1cm zHw@c8X|1)Adg#Y7?yH7&kMVb)vAtTvI=t9f5AzSW5FbdCY@qA&2m$Abpg-%cp#gp3 zBytW#b)4IR{{B-#2V9@`FA#lK>3m*~s}<)pg)J^6tIenq&#Q3xBD)QjuYElm_VH|` zp7UOTt4z>87yDXz^xq`5VoTt~G_}spz6gt_p4Lz{21D+ZH~VK@gvp)XKx@j^&YJSI z|2J5Vt){i_{yKdO*;+QYk!*5Q&f0B~zgm}B^=LoNQcE|&Bj@aI+(vWuH~wz6ph2t; zVhiMwdGK<7)Wz7I3%;c93~cahtsuKGpqJPi&)$le_&sl*ziOXii27lZop?>&7L0J8TjieT$LQ)GqA~i6* zMy#)*D;(=rzwy6eh9EO^2o=-|OYl8cRuZZ5O)Tqa&Goe88kpWB)_H3E(OG*HeNEr@ zuv2jV`q=M2H8kjiS}|_Mhqv5D454z~%1;pgXG_C7#W#DXTkJr8{3W5jS;=fj#WyHC z^s0`}t8RSyG>i2)wF>%e7vP=y;wEv&%w}6&J=oul#@tvZ)_N@N{b4oy|Jk}Cn^{D* zemrJ4J`jlQ92|^99#6jKn2YDVmJx_Qfvu;K&N;jAThr|3u95z%OA+%HDPlJw!l}Bu zuU~R0CRNE67Q76N<{&Q{?XQQmPh6~D;6q2YZ41$V>eA4F38(K^h4L~Yd`eozOiJ6|qysH69rF@bJOne5g7X zZb!*|_S5GyhC#lWP$Sf_xb>CC*;G8Uix`klfZz0qYfL`FObu4rYU z!A~7$h>3viH4E3!>Nvw}+0BY~FyE@Jj$7ra0x}n803koeyKwATwKox?mgw$E(g=V4~_=uqQ#sSjx{Ssv ze1yJGNv#9x{)BGpFa9}K;n&p)f29M06cyz>Of^vun3yOiEG`O$poxn9dF)Q=2e75Q z;_`>DvJ)8<3}*BC>~FjUhZEVie1CW6T0749-KJI6#2&!w8h$~w`Vl1^Ij%(IG$ z6an8hu_6!!yGeJ)kWxuuCRm#!!MagG@gHQ^6)=^Opc_iLpmi^Wm+15Nm>&``NwCit zh(;#5u>{5$`+0}3j1>O9`jW^JycO7`D>$^HCF0jz6M7>ldY?b<+R%kZ@462x|8924 zJSeG*w3|qkOn2j7k>yVg#*_yAL(QG@{Gr!{`u3^LOW5MnM+W`wSA|A-Rre#oKdf9U z3PRjqN(+_!g2B}E>|D&o_yb6$xSO1C%Y)2@>tAg?6})^`HlGTjp8pUPHk8VAb`I}y z$PXLMpVJ*+K=tFjGxi#sa_0Y}PyC-W=MC31&j%Qp`~|2f%}VE>jKHj?k!!O?6a=P` ziv4l<0_a(1X#Ig9$ipB0z1!n4@5lzvMIlr|95Md58n6PO3`LrMCpZUk7UXP{PiL7r zgE6zDh@mKJAqhs(F)^Nq{kV-<7AFXoFMpuQ4>rKUhnW|%Axb5Ue`}L} zpOb$Ea~Ni_u-Bu9qRhZ1LnY%}$?pD6D%B&<8WP=(QvwS*{}Gnl1(VZY&7#zZ&9oH8gamfgVZYF?qz|7OS(giiM{=^NBplTzn(Q zEJd(f(xW@OWu|gEg&CH^rnn;{cLB0KezkyhOMi!N0opfxEVzlo-HjOLb?2jg`3<2X zy)|&-T)Yx;aHrF!LqB$j)31mA>~ZenERN^Ncp}7N8O#uTs853&J=OW#2)$n~XTJmb zvge(CH}r?`1f5a21oAmHsYo1%b2f?FAQ3c(6N__y^9AmTcpmh>=<9})p*L-J zKIcQP|Iz7FpwGeG1M^-8`F6R}FNS_5UR|O6GDzh{r_Y9dWeA@mkoX%UnB#88E{DGR zU8i3Ged`c+&btcwMM0-u2fYBj;hLHOIc~DkZ-71~+vzt!ze>xQ3BAh^uFPAYPsO+E zFz+lbG(XFYB)5aL_ze|KldB=W9^>q{LT?`Cj_PMaA3Mn1BmEV6n%Xabek~fu)qfGB z3B9L&Ddc5zfciC%vqMgw1AW2~!K8C|8JB(MC3kPgEv6k6(|#>v&PVQ!?q=xq-#Ptu z=%?d0gH!EJNEiiiCf^nC@4Y#6fFGF|N?CdO%+UG4Uipt>zRE(uC8qds|HYf}YIC(1 zxujrhV}h6C$P-YqXtf_sYyM!$1V7++i(XHdKD{L27P}n z9E$v4lYXDpq{u{Eny6u04u40bN83d6!nNVSr(o(H*`qe};bmI7?3sui?LDf&>Ogo4 zm88gO43n5V6!t(AwjOQlmi(Zw=V?M=YC~b4(9&hkY7`cE8Y){66$S1wQQ*}i3ViaO zfzK!oBa4Ky{4s00{BfnQhp7SX^!Pajdn|Jt#=)nY_NYH@R%mom{?n znuwbu4)5$W4)4Mdo9XYH+=lbpqi#of3=@SloQowyd~A@zrzSd_HQG(8Vyt=Y7ok5~ zdWd4q&u%?`jNJ~c^#UQ>Z?oAW4sz>x>rW2Li`=@j^?U$$uFYxKD2P+g04}{NPH~ut zg1aM~3glcHGQvGcx&jUAiS*(l-Q$c~rr&&R_8l{(Uvul-cVy415ZTzC3Ejq*n}Ln-Q0OP9KSTV~M9&R+HWD`Q{BYL>pL?)P0PZ0+3DTh3 zMBpFQ*2xgy>y0)a`arii!Bd*e3p~6F*xVTAAz>5bAm}zpcs8(!5`u1%WiWJ`Fx;SQ z((tc&+r;4?*tW^TZ*s5+G#t83q8uDfS717L!4w?vsM*e4EeS>)vm`DF&Iv5)W+uAX zIy?+#cQJ63Ian|hl+(lI<(`H=skN`+5Zp9~!Lkg)23`9n7aY_Y|Ku_b`_Z2~4#pPp z5=*s0aONuhli^VGNDS5xbRiWb9wk}9)bm}2D>U8T9e(A|P5v)pvbgJQMbr^++(BWE52fuac>oq+x;?gsbW<lWKYX-H-+;8o0-yVfa5<4I0wQ8-L>1Ld4IwJPDe<|^L~i==PS>{Hh-q#VyKh&$@@lEVx?qQ!LJrHK`;K#Q3kqUj&Ey7W;<(|@EE$QwtdCu^FgGN$`u z%$c7zXDqKmZ!IoD{nEo5aJNZ^o7CYmeI0?XTJmo*mnud(jT!)qb=J8(84 zKNP|_bOg>iOb>(5|2fPFriW{KEan8$UXB_DOmv0`ny$eoG;}x(!V0G2E{y5Zl|M}@ zn4;+uHO<>eR+x^{AN_C9^!4}vkLg(umUlZkjI<~Bu{2CY$as3=-o^sH#5!i0ON(>h z;(;#BapmU!Peg=#PYCl5()>J2F+6eU z-h<#q-4&XbJc$Mch59hL?wSF`bnsgp#Z3l!uI9s)fPZuQM6Iq80 zgHXDhZ`{>Mk1=$|51h*AWCyqEb^a0ehQfWTYGDj+420KFxsNRj28Z|y?hXC(i2C*9 zaJFd@Gc^}ee7>*lM)+et#;l9vmN$Epz7XAc=Dm*-7~WhtYWocLeT=|V3?lIa zoHU4c>Muxt-{ejXA7J6K>P@&ICEBAmee4#vSN%`LeIaWo`;HQzgrIiZ)*Bky>QFZ zGj3fYKydr9JFPJ#E}ex7IPJYrAhA)=Q(TT7(r%B~Zq!@*1!yqsGf@$*7JX4Z@g@u< zx3zCEWZcebF;pIvA$qT$t();KM1ot^J8&QlL&3zCHQl8Ax8FJY#i)Qg%nsOzneZo` zr#K!wcZ>tkaBc)!755y4t&4_iI1ncSxikET!x^!crtbmrhWHC07xt4VfS9ZJjM}eL z+zsR!SO=bXqv9&H=WF`lqlY+$g-UD#W+B6?a3D@u*)Tu!moDBP-(cA!;n`JqVH312 zxk7n$lBx#hvx!;yMhQrTy`@Yz8~eJH6ZpL82O8mTWO zwLM&S(^#aoGqqpt)#tBXJ08j3C7Jrq$0iIs0?GMXR{pXu^heNZ+Xgpo43QQp_^6{$ zlxqUvD3zoZ+znNTmX19sMy*Tq0c41antKwy*tJR2$dckf_$N^}|AKAw-tF}L@bTjU z;YX<^H}{x=`^u+9Z@KHVe)venlSKOR`W8G4q@EJJ{ha>T1wWHS?mg-MnHmUhW}<6k zZRtN3qBTDY_1>*NKQKHHzVCZd4_!XzX zzUvNe)PGvxbnd9{zV41h)Ne(_TbO zj1KS^OnvJc!6e?hQ?Gl)>D=J9qXWFZqyGH!xDn;e9TR6_2!^PB4u*g_cfgm>gKX7L z#}Euv{Rs>Kbw)1*9T=wigBXJ0s=tXLpw5kPIy#V}dKW|}O3nQ zi5`qlJsCY1srrrR0d?-KKVt|+slEdp7_E98Dxl8c8Ho-YtolH7V2tW3F$6e>I;Z>5 zgRvkuZYN;~4pIFW48b_nx1k5rxyug15R6wngdyP3m-gxC0CkS-Yv{mXs&7XJ4p;qr zMCb_Bdwb{s6CD4Q=)sYyzk?w-O7($=5OvOlx6p$Ls;|co9Ig6XtODwsBdO@XF{+Qo z5b*Al_8Lr5p7E)VLWHP$oIF>cg5!bIOVIDhc)=Bqzu>z!S3K_Qli>gUb51XU-glkoUf>HP_Qx1-hnWrO#F%mO_!6?{Q>XJQC3nns zKI5PdTjuPWP{HG3p_Cujx zw%(QVxrZLy^Ry#&KtIuUI$u=DR6PUwLmOR&gP~8sY-giSfd1wZr(Xg66buI&cN_G{ z7*^_Yp?|j2`7D5blM8#Gl>TU($AaEO;w~Mh81y%_rR$*=Anf$n2L1Ynoz9)30?MxtFC)!Zx$9N3%A2f>Np?}uwe2#!#i%$09#UqZePt^?d z(EDm{wn1OIz-9Ou`azfztl&53e@9eo()|NI)YB26AO7Wh4uD>-{)3>O(&+5tpns^H zoB+MT(+QG`#EqKa0_Y<#8CmF+(Eo|XQJ)F@bk#Y=E8yP?_P;})y2(}W59qf(;&hJp zHjP{%^w~OOQRw4zu6SGqgEfq=K>z4FSLjOU=QX&BS3&QKaPcs)7WzBdP_CL3U5at& zhd<^jZiGGv6);-7{-V9&VmpNv;lnSm2(oc4g*U@u)xw?RL5 zq4T*DdIg4nz0ZRjuN%}N=o@s>JqP_+EoasLEAG65tFE#*{@yQnG#57X9zsb7GGUPp z3q*=8DubXD3rbl)EL0JM6@K)-5+slS0qGWs46KVRB_dz~5(vst6eK~B;%*j15EwQ4 z`M!H1S!Z-+|Jh~caKHEdZaMed)86~t2jEkt2U*}LCiP=F|DqK;8B4}1+CpZ|7YXeA>?MZt$*|ZZSEQHO5c_@Wc2C%_D8^Y)r`EB-&BxF2OKUKv{ub z%fz0AR#&{>COrxsWnz>?3^CCi2e-2w$U27G0&-k+=evdTz`au)meqV>j_3w>pGo&N zxP{XpKTs59X~<#{oqYIXD2s4$aK7zW9`IAnSnawy^MW0X%xt*rzAAXWF?u)nejA$r zj$wZ3ip*~ln-p#Y{`gh%|8k`XmmJqJ#dz~{idw}uKP}t|Jc->4KMoGzCxrWej~Ua0 zz`M5v|%scpN3qliVKg`+djAh@m zSKPq^OmF7W-p>?vK6sxkmJ0sH`m6+>#)5n;wpN{tJNbi1Q##@0c9vVf0W%xLi$_fl zlm}Ec^V&ez&UW7r9Bqt>W-IF>ywj4M zJaDD8=Kqysw!>7!om}Y><65q?mpLfmG|pIkpgVD^_Qz&_=anb z2S03cDlxfg{y;7~O$@PqKt6nqIUHpyjo6tcl|R1>6T-@RPn+foU$zh|Ki|?6P52q} z|MK}oI6g^BK7VzJo2N1OJDWf3{~s}qn(`oIOehxAu#{Zc(Rz+U+AG{PY&P6%1(zY- zYXRZyn5eMg#a$>@0Ul&}qg35DCeDH*!1VD03M4(~qyfSkZ3hY}Tg-C_S2so#V1}EE zRe%|RPDn!fu(1iBp?MT+y4miPu9dgoq`))R64dX(WxvAzYd}#P$WSvO1);Gf3E?v4 z^@MY6X9`ZeK6GP+&vOI{D#`(imvwLVt~I2yV(|)L+rBKYk*d z|0@&Cvjtj$>obu$DJtgJE`=*wCZh=X`?YS;4&d2##)_7MU|M54fp?p8>k7^>AE3yY zha#&F*w6nf&PSqqng<)vOg-oW-etdN>vN+i6M9jL!D<7Q^}!citsh1zoF zRfX>|9TJYu&<3!-7pZKvKr#+|$|BpJz=^BfE`^udL<-Gsn^_7kA`g&wg%eRg;ql-# z_=5txfQ7~AG})^^+8EjjZi1)NV#*upm;&na#TNaqg*tmx4w%v@q1a_R-T|J&0vfy<{E(SwI{1k7 zc^}-({KQ`Hvn-}Lm2{l6XK(ZBqLW-6IH@JxG!mzpC|Vp7Zeyb_a; zO_UAZW+rzOeBSyfVaY=Q#k3NZuBNb`g41D0?Mhzqq_Fb;s!X$F@&b4v*^4>}x55qU z{PdM)am#9y;S9zs)h>M7a6|Ao!}_RI(M(R+PES)h;TCp$!hZ5aw^$;T4-tzNRu)v+ z{Jiii3&;Ad?mKp=KFW?3VWJYX@L21wOlhS#CE-1`u|UrkMHw`k+~g(zQWmuEKd{ z5sSg&Gu*l>bpFRJE7d2~1Uut*z>`c8yTINoH$eGaFJn}Af-#p4K4*?dU$NA;g_ZYR zz)NbAN1^A|-NDt6T-g6B2h1{$b`0Fm77#vRrlQ>NvN0!II@3)geBDg+6!@g|QO?-F z=2Xt8KXO36dyMuzC@iOS6|rHYFF%-8vfNusk|O*Gr#YbkL7mY<=tde zGrK%9WY2o4^TX8@C#UslGMehjaCKr%^Rau|QQe|yktT81pZ^WD55l#i=zTknx97cD zxOVUPw1%sweW6-tRdSUfRIY_9ya{aHo~yW1rsz>jpA~$w-7Z;`$bnf|?YR3I##2wE zfd3N4*=7C+-WSFbr#1lpT{z;oo4ndQABHpc0VVb>ZLbsl zxx%)0&crq=<6V3+<-)&WbNxea^XO_!4nHu`3!=dt-D|TSxsp8W5z%UF7c@S<9cob|BQCy(M zZCk^&U*cyxTDWA*@EdnOu;ZrNSu=L7#IrLyDM|TQZ}(!cc74ZKs~fK014u;At0I9K zlpm~c5}J6--2v$N5BGct5-7{_5v5$Ym@BRosCT8G-eznL*Rjd_nz~nGLwO+6xj)gp z5G>2>K{=J0QBM08ac4ji9$}z%+T;s2VGkJ54l`&kL)o1ps!I6^_pa2hC8E~6W6nB> zM04w+CiV1j0WVfQp=K-h`s+y+u0%chqMPXSO1HrFd)x#gZJ`BS0^yg4jaf(xU2oyW zO{A_DQ`6Y7${6Zm=o<5gfp<9?z4vK9(aqO&viXGm0ExazBPW`h_c#uJWCFc5d7T|- zU=^-s=^#e?Oj=yqtL6+Q)7c;#gOPwaLRPTl2p zdJ|Ww1#Xsf3ztSWG~iaWdp?h|4KIx6RQtC`PW9OOPQ**F3TgQmb|}V%qkNLw20Wws z=f=CE8Or>FWCoW|CJopT<1UnCaiL5Dld0>h;!!d&6vfWfuLz~i`U6B5ZaIlnkCqS(rA{mQ7n>(p)=fAJERx`;-N=h7IpW2>Sym z%WlPf<996`;fa#+9d&)r^RlBNyr_8ifmHuBN_vlRP~kpNjTj3Rv_)x+pU>mg^}VF_ zOkku=Nl?@%7=V)Y*y2ZFih7tnqt6(g6{4tI+NRQ0h%{H!ImV?jPPFw7B^8PAHY9`~ zKjghUH013Z8S=VLag;PFrA@5j6$k$(x9>jkW z8J@84pv~n%UZn>@UT0`##gI4kK}S~}3V9;+9sY=;HIyeI&2_VK$Sc<)_fi|5NGz#L54Hfp~VV%MLNIFrimNp2}p(I+VhqcoBnQActM=V_(6N-M|@bxX``C{L_iu5nKCFC*WB05?WTl=U!zrzatHB=pr?O;p%|oplwsh4 zfT!Ev3a&~gRVuwyMkp*&&a;nz+Pqf4TUI(4rW@qTBGs1{4W?A{;%c@%J0<9)ObdET z3(;8cOVCK@44-Gbn4tH3gTPOeLUqMSI1ii%&>;@Loo<-Yv=r!6>q08XK7_-XP#=-U3E_bv3dp+Rp4#E?WNU1RxLVp~dN-0G^{hFK}CW8!-J8yQ1S AO8@`>

n_x&%A$LV**KTbGm}%QjeHZ36||L=PVxBbzPDH?Pu`L?vYSGBKWD zEHW*k>g7X*L&;0$LheY#lijnmktlSO5fjZ=po;7s>ZwHKAh1^t6afl&w<%7;RLSx{ z*^%SS+KV7L86#UXd+fkp%pI?3hT>_&?J$RJOeF6~?TlF^%8Y_K_v^i7UFu{exK--p zr4{SSdZ8m%AqQUHmDpKvLmT3!Tpu6IKt7hY5~>;^R^}#jy3;76e7OZ(AyAtK7q55I zh}D+hFZMS@FtN*}-H?jlhxm|k6RG)-0St5MyM!!-(rGG?@9We%O#{MQD3CnUrT5Z4;A??wUq1LMVNZoCG2L3Ft*YUI+kUgUNUTgN2e3)WQS{ zG2w6k>llDI4-9kKyM*)vRPe3*u_;+^@vzw-)g%fi?c-S{8CZDN!8;cpQWRo~f$1=( zYjBXxm$)9vyasM?0Ok9SN5Y$zwrtla7Tsq&@+|&`>BHrJKK!BZ-u8 z1=#_!LeBiz6mtE)Xg9S5o!!jYzG$5m9jxb(gn$fkvLCfl5ws($w zW0&zrqr|2KYZ?90wRE}TQ4@{9$B=QfC3r^?Gp79F zaXJXe9U&ny0S7dnCgm`YTs&&J;(PIlusiZdsK@3ac~Z9?&Z0pEKqML) z4s0cB1aX33)4gE}so;7m88lKlFIhF=sc?Wo%Of#uRFt5snH+T>Az31HA(a~oYPHDy zF_e{T5>hrdEW5l*S@dG`&>AZQ5?RbBOt2ciR?0Bj1P19dL{e0!&sOINLN<%6-UBqE zlz^0~A#B!fR|w5OOf^U(&7g38Sa2hk%IA#TBrUko8GVAis^l)PW7;|oBtEjyP&UZqW|e4?A^Ht;taCw2HMs&Otgo(fyAAx zhz%>sXxRzDO+ep@btAZV?n}4Bw`^Cv_i0uleF0t8c0kHh)vLzkp?XEv4z2%k(<8nKcf15CeYa` ze36>z?SwgQk0G*Rd?sT9q*|}EHn$W7rgt%pNm3{E?vSI)`p5R}{&fpm_;obyYt)_J zY*EElp868SzG&n#GTmS9!(_Dsb!X?I;>Sa(-!Ijti?Li+Gy@7v_lne40VF%OoBr5- zIe*a!_^(^eyij`%Ul1;579|v?5>YOF{c$u_mK2Unqsc^3q_Jwb8fK=iNj5-`nd^W> z%0)CPca`@HC;_Y>cynG3f5a};@cVBGE49^Hp*{APY?OJ0J2PP z8zhG0D{Pd3E7RU-Ux+MMrV!K9%ll1;=}up_x-hfc-29Dz8CO8UH*agb%uJ172hrl_ zBC>~Z7y6(1gpK<~ZYEF4!9*-V+HSeygX!x4sb~K5a;bMn@tj+)H2&guSo(xnXLp_B4af_01y|l^D($c5cxBEzcE%j8B%0T} zV2<}Q*X`Kc+%eDlqvQVRGkakjiCM4TWQ#8MK9MG)S9ty8Iq(W^R=1b_Ja(0mDqBnCMsD)HSBKXXUWzLrd)H`-&YH__b~v$tFCw>hPV zP+FWBxmIeSExN`V-|dgY#;T`95S1*aOw`^)zUV`0>> zCa05_ch}zxr8A40@4QA9`}5`Y`GsB+t)73aH>%s}2;?i`fLUU@)0q=n`dd;fKe^b`A`6j^M`ulEL-xpK(| zkzOLqK=l{fL$BwS58jHvmG4U&&010dIOY>O?|S0g7u(g>dyV93J*&Z+O)$E1gV&T? z5btt5X+>Jw9rXwJmoDLs^?A)}ZXm;}TOYS~y~_>718?$5$U-~fCa+Ila>i=F_v!y1 zZ*Kx$Re3c2-!0p{SWW@PvX5$J;PbGR3ax6+01Wzm7`w0HF_A4Y-6`Yi-?4K;BBM<`#MLRa~oN2>eXLv ziw=^XU*8t3DY-Ljr_bc3iW{GgSS`{Tr|o5G@>(MNE?4KRbxU8r7OY&Nde#!?WVxzX z7af%Sb&jWrBPNa78p|(KbJh`0cd5E+U35sHj?#yCL425?Oi)6Il4t$7b%eV8of_B@ z9YhNF87A6$wx`W`j*Qxm%h(^3j{eA=BSf)BQ z0G!9vXB(nds|o9g;`^97dwujwYCXUH-_-h()Oy1IC$;Ua=mP&1HRf&tCvQ>TznfSw zP3oMxiOl@5n%PRHJ!(a3^r-#TWyM|fvp&!25T^l@kg(kpH6&4>P{#s4Zm4G%TU6G@ zXmx-!7`-t%iy#Sg8>2Hx+x*nV=yZ^$=pJGY>`-UlL(ePJMfZTRkE{FcVP;M0?R%mJ z`&X%fe*oz&Q*-_R_;;$NKM*wGG8NcF@OC$Lq8hb{hIgn7HxXTVg}P}In_#DUY7-&u zcc}L^MbG2qY4;MR;Zb$ty)4$F>dAYf^GcRxa6z6hEn3DHjRTt-2*z02HsikNd4a$y zyH)#U0x~T9QuS^o^1&rvs^RxX=LELCrmni5EODDu)&n5mCN<=NXk)>>QBQu0b5O1JNHal%pPu4lDXYmfX{p?6N+;59%hR0>&%qiU*_D z4cX!2h8i#R;Ur`%(Y7*4V!&ZtvT*1#N*eFgaa*F#X$t;Fmy*x?V{}WhAJ%_I36O4GcVWnVRxYv@vUK&>z%698}js(L<*}uSg;X z9tgpW2S|P_chGzgCubKK9mJ;w$<62NO#Tp$LTynB>HBp7<4aOtkR)qzR=zrCad}!D zH_eSV(_$FIB*d-!7u5d>DbU%%%ID{1h0^>%pZt&WH*2>ZNReeg{%rO;cf4@N3m3@W zE&eh&uSos@{6Y8&O9p9ZoQsK>0mXGz$ukgfR>+g^1Pwe3BBSs(4B)*noi;NTxmN?# zvp0ka@Il1a2;gzlFmxRB>FVfq5bS-mkVpE?JC{lpxz+6;^PTG5cJTH(RkKycAD_LI zDA>!?#ar1%E$Y#&(Wf(8bRfPi|21tDQoiRAU4GGn6~igGwXH(Rf6-Pk65>B}bH&p< zuG|(qE(bEH&pNC*X{tZo7Cpdy1@n+sYt;ddk=o!c^{dC&Fq_l^kCBD&GWFGC(P^b9 zwaLy$fZhzG)^rJ>E=MH?k4|;kpQ5KTh{ix=sp`4ej5G4D{{%@{rarvDl#z@MRu-y5 ze{GfqZZ*}{7n;vx-pcsXp|k&X{Ok1i>(+VYC7jhtzZ-JCLlP4WBuGI3tYIv1pHnq+{BP2|sjLJ^zzj z+77&1{rbL&!Ld8#PH%=&JwC7>q#Aw8Xnqlb1>%RuWDq^R_1S14O}%_y#n85oo{b*k z_us32^Fp*jz4Wv(T0Qwfbf6mXjFBI>f0ugl`RM*7J9lv>C^tdr0-{Kl2p7=mlAY1A zz?-|;#_xgm5mw-BN2l3jGYMcuKB4!5Z7yI5MK-rWT&Tc;wg z(C=Mp%qxueDs|2)2y9oWyI+a^O4!`1(HT5`{wjj-4z=!8qVZp*-g^~hzC%rXjkFCr z)bC!?-`ZZI=@sgQ*P?&%uT`fFsI2s_T~{}t(vRr))aytnt5ooB(UbQI8sw%v&;X&` ze?z)U!sWm>{!eguX&-R;tH9+F_1v2P{(beqo6(YtN5~s%EGtl54^$kxPx5?=OP+60 zp|_)l3bM*l#YDM;rbUO2ms5zyq_7Q;^~|>cc9Xj3?dT3*cU*ULcJ}KjSu|8@cl5~U z-z-R>7<$R$0zD+TM$SK?Az;^Lz9YFfm#Oc+OEb5spS=qx?^3tCizq)pJ@#&NNN$&i z`5=W3fV|I zD;htGKE`9rr{Kt5i+tb~`CuPK-gk?vx-MWmRWJoWS8w5q(mYF7>>8v>zX5{9A~*whX`Oagkvy&F>Com z21YuZ=;Pq5J{(o?JN03fnGJE@on`)ZzxUR{u#@ttG{=dH*j0%u<(#g?RpodcAW;Xj z`*f|^5H@#=S)CC`tMf}}ua1BlO;UPhq0 z0WYpqj)ZbUn=M|%8RC~!EF)%%3BM|?HFoI$C|Yc1%O$F6q21P@5Rp<)?ijaZaa%|J zcu7(j=qQh{I)^Z(D(-)R$l_41qBa|`)N)Ea12js)V7-!Ge*G$xDH z!i@paNiidBpa2MN09rk>%{ptAKa%S%W5PQ~EqTzihO)#cy0tS<%7GjZ(NF*ovLJZo z(D63ao@*ARw+B$`tWfnYn1v&NlrHtVI%j#EUg9!_kXS_B_jc)9VA`Q#dFDZZHFu~} z^2{^X@Q>t~v(vl&jGl)~NQOD8Trm?dFrdH3MkZ`W$M=Lm8h=!080Jjj9)@`%%8}Vo zb5f*H)HTTQK2A1GQS&_iA62$#9_1-C)#;`=Xx!Vm?&3BihCzBdP82WX2f}p$+0w0S z=}f2#y~q{@?rLi!Cm+u_XpHRhxaYFOJvvi1a0H(U4nz?reJba69WO|n(qP&;;Heu} zZ-me0c&@*5q$rkVYxs`Po8kcF)gH}<(Y5Z)1<`+yjKa98PMb*0`#NgJwPM{v3mH>g$Diy@cGFg&)OyjNW+P=;hc;eOdc)9A^dH9Cvqft=D|D8<% zNwMPADPF{EwTqSzs*`A2z(Z_7eg7~`tU{aB(WPb~d&&)-XYxw675*Un>s>!I*AWlf zy>@H%yYtH>I%%U@S3r@d8)p>+(fOARmC_zm}tN>u|rzGT^;L)Z3Pxz0PbTSXb8luR4+{vUmQQRd-YIy z_}B8hH0uM2IAOj1m`vCw@w{XN)Y)TTdyea=Ze*jf#3@21-#5lY8WY~i8oEQJgbec? zsh7O(LJ7Qf08v?P6Arg9|FCqcvFS(59A_*&Zra(sn_cVQtH<(yAYcK1j{#j%W zs>6QqUs#ttr;k%C?aJwIZ_DD%)xdC5#|r%y)@5(Y!prR7<{^u)NCmx9b=fi8!`8~bsVo`kh z7sb=c;(0hqBRQ4DjfA%}doG@%FR@W4xhu0+*_FxK*=6ylgKxRbAK7k2slw$(!-FnEd|+O*E3^alaXrr%-kwB6Z0a*&gjlncAyO zcn4)A5W!EBJ8w#E)nrX^ft9u{^1B8wnWLs-Df4_^+De8w5xuWl=Y*6>djD&u2#sfm zYkhc@*#6K2&x#I(^=PYTxo8doE2I@gE5Ie_y&7|Qv_PG6UP;C|u0h;(Lyd{T4~IR{ z{u&$jM>;Vch{n^BKCHy-b??my1wuZ=Di7L&+5elK%hl6m`8lIpjhFVxV8F?M;hqt{ zZ^&#jhV0joOCvB<@=r_ptbBD-la-$)*y3`fsZcivtR~>Xj^qkOUkGc>N7`NZ!?IG~ z?>f>XQL9}oEHn$|YHk3-p-JHerpFB$liYxXaJc~qyjWG3!CqT`Yc<;^;W*Q-@7I!c z!LOrt_WISkg=X1UVKL$+Am-5waggDh8hKD2Pl|G}@m%2j1B%CTbNu0<1M)mg!b3&}E5YOx!r>hXCcjWO`M_#S7 zzqZ$h>>O@Cs6#8wBCD}X>a>FphX}B9@b6bYtu$+X+E^BoHjzZSrKL%@;_t1+&*~x0 z`-@GU+xcCEkY%3Th46k2lN_fzzu&fCMEQN&f^l@JMQ2zADzAz&?gMHZFazUO$UZQ9>;j!mnXqjrdM=NRBIr%y-cxZfH@}c@xRo7f##*;u--V({G_T(K({R?J~~za`kVYekN+FN z$#wyPbD7#!ZPxh@Rfi8^{Y%y2LF6QVP_+&+58zk#Aag%{RSZTAvPB&^*sLztCihfw zoW{IbF5?Kh#4F&BYQBOKe-*tlku?nu*NK7LXjcnm`RspKt4&Ukx#**9|0-id|?IkMJD> zbD!ut#{F@u{&B4P<2e1}IQPd7^p79N56mW_yVSL3;&?vMH`9GxBL|;C$=Wi@ef*I3 z`G~IiE_I1rYAj=K%2_X`&^fp^KEZf2$P7x=tf6LQT|>UDk6@-FiSeq8&Y+%Ui>4?| zIkoYqcS^{yv79NzMH2wCehe{L9HX+gOcE;q>y0 zHpC0;6aJ(bpIw*Z^zb)L$?M(x>9(SOE-$FHb@%cMLKN5~-H-^r)9dy<>53+I6xcc1 zPO%WSz~43*(>KYJr0lIJ-!{uTF2!XZ5O`Cn_7Y1-Ds1@2L!)fNv zlyLqz&U#)nff*jV$FG3oL$bbQIEl*>!oS<=Y10-Z=JpYf&&tm2)M$ORB= zl3$H=G$}MaaI5@8U$I@DJ$(atlRs}w=~vvcx}}V7qVHZBcGe_X*d=e57cvEB`0j{I zVtb(wssBb{DcbqCR$%qOX7l*iMH!jItmkKM!nkG3*(0n zKS?GIz*~!G3dnWI?*Sh7IOLGPa(NG+wtL@K$uj|gz3=;uNHC%o_5cn)!T3u|M8 z=B&uHnwtWe%z8Ry-DpHVeVhjQG|qo(&j3#x+buzC}!v?y+tZM(me2# z#$w`X!~HWvZ^;Z02#C{TJVn5K2g^>4OB=- z3%x~JFURLq-sc*@zharv`hxjlqnAG$irEVyej-Pt)Mn74)uZ-KsSRP=OKt9Eb?EAv zu=eWFYQe;~URT{MRryOQ_ZG+5Jc9VLiP~j#>Pz}Yi!?*jU%ql@D&M%=omPok-D&7w z(1j!=%j!$w2TJoADneNrxFFnALfcL1hojBoXV#ZXKjkbZ-m?U-7PX+UUQS+mXNd6A zBt@DjLM8qUOSURmnQ|AVOuHOa0lhV=FGic?=?zjI)7Tiqnr1a+jCpvxu{=Ri1eK-0 zo(-$@W6bH<(x&cL8uO1e$5v7n<+CjWNAG4kyy378--U<0>8n2^(5Q|cYtAfgU{r>U zZ=T*GCACI7xKR^WrS2PRI#~_nat?vWTr4f~-v? z?o3+LrQ^(Lb%#!g4~dakl$B2an6c0_XrdR~Llb)DNQt6#N?ClO#(CDgBsPFektd4V z92B?&PSC?hqN$b2$1 zdO~D;+RureYFtv*%}D+3h`kNhEm#5$jPSivg48e5=}An-o@jiYZjZC0F1F*?TN0mi zh`7l{4tvgr7(C>NnH&^yB>QwLg07G?q*90~GP%jKzEtas!G|5CbQ9@oi&FL1|H}enZ;%7aA(+J;(bUMQK zzI5Gy+%7|^FRS%t*nnyvCmqcoo>P>%SbZ@b#@2y+A#*OGGx^Bk3 zOILOUhuJ(1o#;D~$4X2Bh!ZlXP7>}B(HZ(7>)0*Lv5s%blXdKpC+pZ#CZxmZq63kV zdZyEZAfizI-BO03^p?rOW;*-H%zG(tYk(mS<_GL4o?)yB*n^;&uL{-NTv%(oCVzCQ z*AKw1(3wJfAg~_s*#!k*QH4JJyJqrhjVHBcS98s71}o~1H{~glgidwI1bTEoLT;E0 z{czh3D1A~Xq>AC}h01}d#pft2 ze9uRUzq|9}8D+6B`&3iAH}lzZyvRUO&aexJ2WOYs=peLTSBYI;~jdV{3p%nL73$^TB*io|i~Za%64blHH>! zey}+#UA`FG)UgMflPfjXlbikMLO7_y0sEETc(8exsmpLBUzf?_2)<3FO)?KBE7bQV zVGR9JEt+JGDZf#SWkQ^Ey2D`t27)|Mhs@LJp-JYWcmgav#5~LYl6vf~yo5VL%4PDxVW`g9gYpJXbQsySRA zL?9w>YQXu*XW&o`!mZCvy`6}o7L`E^`&vU=O1@u#vk2<`nzKe z7}(3plribuUT?v8EKNq6ty<$tHR3R{?3i6y-k9JLtS`r+)8gdJWZj(>>zj?V(ntp& zb2{Xsk;EvZhlxHgN^twS!_1>S2-n1i6Zcssw8GD6#S4ieu^AWKtH0!d01o+@A~xY}#QCy7Of#hJ7&mMeHO zjQecU=*OWZ34ba?cWo@)F9fdd^Y1=+hmmY2o z_Lr!84>!m78`ax~b5!4l;^y@!=J0XcvK8SxMWH07w2o}&;q4sN)wbn^T~J00Tvh@Z_mxiE2N>%<*@rr;jj;>h`Mk2+A^X{@l0jDjOK-Mg<`|vTGCZ(~vB9(O+Y> zLR2Hg37s<~v~~->2ma_%XH7Bp3tXM4V*6V~K~VqOHC9Y~zhU}JqI6CUKv^?Uz~ zxo(|m77pz{w^ttIuuzhi8#~CGo1U6~n)#D`t>mTC%n_**LZ!X$6?|(Nba!8im-Zhm z9%SPii}y#x;w4iCQKH}Ct(|U;N$^0``0tQO7yX__{xwKL3v{l)5y z8CcRU&ww*zsL8XgK@)?37wkK1b9Be2rHi}yHGzChw{Hzf4}GK58$J-{ zz|sv_kp#o5g+Vd}Qu08+yaTo*;L&f%W5OxqkiaH+LX?_XQ1H5+ z{g`ezogpvn-0($VmutL$}uxNa=Bvc?>$#WFrfYG1pl5 z8Iy_+MVk~}lg53ehg8+E=D2EIyECPBl&C|A83c{bGojEyW&gyS91I$lt5@flXQ+zf z%tQOyzwJ2lj(zQfFjRf-%IIRPZvXEpqd#5_jf5a^2+#_5prWz*|DcS{iY5W!a{=Mg zfC%}F%QMu`Cz>a!xjzCDm#eiuGN<+dWXwtCT(#zSGu-dh8EW(iW~f--FA;{)2x>O6 zJ#-uj<2rTrkIlkwR;XfQ%s#;!jR>>k1oM=*oXWv11hDOJFAh|0w4ua;B~XqrF^YMr z^+e9CNlf6?3XBMwPcqLa;GjW5aDvmIk`m!SV%wCc(I=ZFCVfc!8i`J%<$zV0YR<{1 zjFatjoop`7d@)xaj55{MpQa9+Z}U|ARC8?BpQB!!8MXCHGrwPLQ0O#suK!B4{4^Z- z{-T~Z&8+AvF#2@!8hGX7rQ6s02L)crQeT~JmiJZT(;T(=46~GL zR?nY-)R!DD+vp6n;Y?Hw$yb$g%*7E8SB4rL{9J@&I}x= z)x^t;vJeQ+pccjP1&iV?Q@KvQ=M~`88cHXO`e@b-F%i=lP-E= zPGTKrnbmy&JLIS4uen|Ez)w?G!>%=NkCxgNbQjXXiOPA$XZ%^cT$@^G@_h5>t|cNX zp|<-Qf&xlD)&5hnsILo+I#(><-bQkncy*Tg_0IronQHo(IkDfDGt`A=o7JgP4?&{z z&OAX)J;xltv1I-^$X&_i*Z^k?Pect#wM%LHE|N#m_6FGiPWf(T$_}R}LtVbmv|}v2 z$MCf4XKD?AERgM3Xde9CijC3g!gh7WBJ)Rh3_rNYoK&(R6Y9$aPU* zP|vYKCuJqMqr%jPPZ=HpuRfS++2da zAuufHi-m>9NcMSc_RBJMsb|h(eRrw1&ok@s+B<78!tb`W=EdePKR&FkzM+$0Izdhe9DQl+PA;Ae`gyk_B`8*)z8HlpctTs}>c!dJ;jW49{HrA<^ z&o@uv>c9cNFy#ip>F(o_UzjEA%iDfoZk(X6F^UchhNowPQ<8Wiq$dI$BK_-YU@ccC zg$5GoUahY9l{r3ZsbqsiG%i&gzcL3Fy%m+8IwP-p(kA{^E}1u8k5rbaK^K^Y8uu%6 z(0|zHYr2zUpIzVV^Xb$+e}dEbceoAZU$4Hp(CnDK9wmcZh7!~I4Z(olwX8RDy(t}6 z#B{L&p|!-Y5SYwa55E@=PC(E_syI>A2|6e+lyBi^%mm(EkWhv`-?w14AHr}lpfxi>lm zXI+>;BGQeoh~$>3J~}up7S?~t?l;usjplIyuEf06XpYY6xcwRlJ1tp25#tVYc(D@k(UzrK)&9Vf*Q$@MF~3NkMg$%uW2^& z)vP8nDwoSoZ!*uJz_U%}S3thwTJyZ1+)A5ro%w@wxmwPay6HOFs8%msXO_u0{&}5w zF#}j~J;QlJJ#;-P-m32rCJv+Jd{ zMZf3vL}2BOYS2yQJK;NbFE`x47wV?7N-Z_wW;1uccb;9!OVrTL)9dwL#3S6id6~T9 zTI+lF)XQJE0ky6Fo&3es)*X+ib8bdb{7$<*aHV_2v*^l{E1MtZa%V#CsCsqTt>(-^ zHmUes%H@stM4#>cDpnueYL?aE7A^Zh^LBAoHNMD{{XWV+T)W$b!;`p1{%+XrPkrPmcF9__rxWdGbtA}GnI#b!x&F&c$8XIpS4osdsL}adtENC?q!nld_5^ zr9$FJ=ea2`6VyOBGq=#NNU_`&;GHjHnmt65>*1+iI z1l_BhAo-W{no%2uhEbY-RlM@2u97tc=d zK_f1JzQj%O39=?z;k*RL1cItK!lPlJQ3XqFE?Z9`G|0Dp;0HlP_UA zPRQR5d)(t>EB>z_ufDh)Uper`L*9LYaM==qzGz>$eS+{iLon+E1Rvoq8e8In2mA7)7?^efcFk8wlHUTu*fBApNxY#r< z!QoYKmOoTat689i-(}|cFIETNg>&3y^~<}=k)_>H#B^L>P7=-F?3i|b=da_5Y14ie z%9T~>BZ|?jeK+?4b+?7;#JgeOiN4!rw3;D5KWA^$L#f+{(j!^>@r~x$M$^V7Lm)yh zpHQ9Cw6X1od(0cs{r^yJY&J)#w;$n5@~DbFYEI@@byfva|_nqzqWez!XDn1ZX-bB~&G`SjFmLLMBtO@};K zlU*qx52{8I>)@tsSVxwr2ez3{MwaD@yaG)jU_ka0%=3LU=?RAazFP1EXRoboH$Gt= z?+<)>h3b9MTqeYvX7|4pj=IbeC2I_Eib`ODRF z&r$m>mG-=z+o{j%Z}&W}=l0jXV)3u{1yJ1y)&KRIB!1t=7I^#&VYT!hp)1zw7(SYUc~|_@UbRqx`Yzv=_|?g%`ZY za$TfukO!ym7a7?~<^Ky7+TW=ue=%!DyNK>@jmmg^q;6X1@n-M(?1J;Ykwd zQ`9AYG6&`@H^rS=(^HOiX{!4#=5zi})PtR7b)flv^|wy*)op{MeeUxNBq@%oJYk{bBIAb>^c@212L2?WuY_@T8QZUBqqE51{?J@xnI#1E<5 zn)uNd{i3cr`OdA~mHN9g@k45NB!2XTzi4RLBY;Kf@7_dtsoazJ(HH%qMG6m2iJj4$ zTDvLnL(jTJhrh_)QXR;4ZwYs$tFKR~Tv&6O5GC9B)aU#Ix;(-cFQYTKfKhLH*Y$IX ziv8|JZ?dUaEB@@(G*weWQ=-vzLw2qn+M zcT=8__6B((%ptMpXNLqf>qxJ4wml9H3>tG&`))u!3W`Kyd_;HBMLnWrr&Q^r%Aci* zKcz}`q1-{BS7PYU>+yJndfhgA-NA-^{Uf zXyV*k-ui|{(7|v;WmI#3Bt)WC;1-eoo3Ge&AQHOQV zGmv@-c(*gIB)qrut5UouN7`g|T{62a>LOV(J9YOPn3~ZDvR;zJjEC(OOm@EZ2CkZ9 z_CXzMl4uW&7cvsV<&89*?0cisNLeTOE@u(xx+mFnUu_b+$PtT!h?MBXrufNrE#A+K z`Lb~&Nqnx?Xk`l?J6H{!Lyb3*W3b#YG*HB82qvaSeY{WHYT81mrz!O@-gqF0`{udF z4>Q>jM#7-eL=Ul)9^^B#Y?g1$$=dY5Sv4Tnu{7|EUAf6-TIkKEN83VM>z+`<-ZF;` zxH~g3^JubUt2Ek5p%*1S^bmDUH>-l_GG$eCJDVx8@j8m6_pJw40%8FKJ`N{Sa*$x9 zPgHww=LFo^rS*2I4o{tqz=abq2SK9eVB40byA0czs1qudSnvUn4rNugGh>i0SZh45 zK0mYanAu&rJoN;zg(y7xB{3PZ`o>l6y!kZNY3~LbNJrgyiK&-MXDKN2%9rZTe>ck_ z-7?f}hI+-*>O)?P>5&&byx7pmxxA>a`O;L(=#^5$wR5gost$UacIq{bV9vOl2spfm zHpmOoW;+kO%@L@;n9HHBvLMo^ae>#5b*c@N8$+IKmYx?6KS6)RefaM0t{&HZjr;85nsyor|UCY{2)kvnmHvkmeKep7NUQ zfXGVkqho1m!&#MO)4QYynL3@@1#dI@Adw@76gwh^9nsWqCXj<+ENq8TP`6uZcT@W> zH>gA2F$Y9?`%rpbj;=NN(0s{~O0KAx_6xeIU zBQi;?URu+iS_zpKqiTaR&_DworQG_{%KQ+N&VAuZ$fTqiZ9Er)pBMsS}haBzMWfCr<58y2{CGx5Bi2B{TW_9jk%&J(d z|EPS40k`>GY&>_Xm)yta@0y3HfBm>JTGuMoNGs($b{(@y*^S79iM8A;Wj0gh!#_i1 zQ`mSGE0ShsX~mUZ3Qp2#DT~oA70D>WV)O?YclDgA8RB&85S(gp9K*6GTM`{^mwMZ& z$C|zOo*Ap_kQW`ixJvHvrr>e5IHZN0L*=xZs4cDy4#}yUEqAeVQxQmvH+nU?q)1nC zsS+5>PJ@mK8N>S#a0`G%R>+=jRO>%5?US42ZIcKWGJ+J&%c^Jghi0=BZKmj+&sJ0abVsEr?*W5=L*16k0b{c%TsX)mO2_IkD2r4;94XXOr6-h<+I zv%Ele;j~M&6r>g{bh5KnDTPMMX%I4&B92O#n!s=H$rB4KXhTAK&}g?Gk5_(u{q?VB z=&KU?Rp>$%HsmC;+#}s^nF-Y)NG)1IL(<%Pea3-LCtpO4PA+)wRsC7G)a*or?UMkx zwVT`ma$%%Pn&6qlwQHn_z6ng0pdOBKJ`Huz&}YHK9)NdCy=2$d^{*$@bRD(iBeTHZ z$ap?5`0DGR#QP2S)$7@%R$cogENco@1^YlM)9cxDdVB8!Ep+O*5&#nQGb89_QTD`M z76lz?PhGH&nCMh%GAEH(WaJ`wd?n;4(zjh?JRwq$OCo7j)hGYZklev%DohD81r`9T=$Vlwg}4r%uQsp+7^ zSBEN;zi$T0=oR@>j3}%Z_mB?qH=|6ANEcc~uWNXAmkj3;wz#f`3z4oU5uX|Z2Lyj9 zBN|2BgM2F?{B}B7cKo(Gl}0M@PE6unlqgl1X%(Sc9G__&?0;9CmucNom4H=?jvs}x zr_YiZGQN(s9iBz3mB0r*>Yrhpvak3^jf_|i`=3xBM68=pyj+oOomBKBx>ap?mnSL$ ziSDuxdtoTY`Z^KMsD5OAK^mMIkMQOh1qEtCu9Z9RA&GCq-Tz+cT=)h%1*d|?BK|xH zyxS3+A5T-w9r-ot%3P~FCk#`C5&5z5#xvD}x!lCaRA1(TAK9ur&#Lg}s)>14`Os`J zC>YOxwWIt;#&cL$oKGcq6)LAR9VUo@)C8u*b5wnv6|V?;S}#jvfHXY=F@lJbhw-s` zIM13B&5`?lH20fCb83IX8t>0j3k<88ey=pFVg9gs$gpa7`L=vprz)dXO@+vP5y1KAg@nkr2Sa$>HNN$;kzRBH@MHapr} zLs7Y3z~=Nul_g?X7Ll)bkjqgXq;%Dm2ocg~%R6C17G9BUK%mj)pimGtaGJw$nl3Wr zo(Ijh29sc&&7?6piy?#otPj!$&z4{;1bj0zLUDvgEtTMl`kBlqg;{+K^28 znN9iC>hXLlFSD4BSQ`!XYQ9xmXKRs*mN)?hfk|ALmutahngjQUtfyDjj+582MzBLn z+G%1DO2ayhg8c!4;>h&1&DV92BE~nOdD;lyWUjjSrq+7!o9!)Ron9T`FtQ%Rb*%{; z$>D^@H2h#Rj)FoQo?I-&_y{1gY@KAmLyD6$^0>XRJ{7E75)rZ1u*hjzGs%vI)_Wt> z`Q@Cl0%v7VK7J*x-7Q;mlKkR!TGAv&}V0=kW_@?#p3 znWoxn6&{Ly}EPFp}*6b3!AsCIGX(HfW+%>_($=4{MY98{tZ8EwgMb_}Cq6|pPTw7@H z*wEM@AmXA8Wr&)_r3okh}L-2~8gm+T(e^EmxCg zZQ>@qH*5G;O(A#nO)oh%jT{-!V>?IhH;EuX>`1{sl*leUP`?V85p`Lym9u}gb|K(0 zITsp*e1r5|0?HPT5Q^yg58WA|4U>!TRIxQg97EEyD`v>paD9EDza!TB0=ulf|MDMP{+ zB4Y!$Fw$c}CqPe76UWS>#V#gDAX`xQOX^0%0!z3|;M>O)my;MYg~d=@xP%N_Sc=P( z;1rQPw_`*Y0bUNeZX+ZRm1~0N3nNKvrd#(_wbC+TQQ&GQZU>4eaX}oa zXv9gbz?&|IOd?KQ7iGJpY>yrW*)oz4|D><%HxNlO9%VX{43@tI!zQV->WRib7a#cc_Sv)dcHr+Ox1YK|)1kP-K(Ylal!&8XuZN zc_Jv>OFBE(%2(g^S%nqx?~kIE3zjHAz@P0Qoh*Uv&r4Qngzt%v@n`*Mr}v3_^r2X; zIuMd%q8$G9Ob z6gLV92+Gy#G;NY1Fb0IcxR%I}cCL^FxvLGKjv4BeDy#UEY|cMKGW}ZOgu^E@u{|=TZ(p>%O)@BxJf5{>l6Bq`1#Ow41ZYT|VwM~`LhDSnvKw!{KOP{+0TPf5< z_hqhRzcYc?ywGQjPF(c_eMKC~R;LcICe!hlVQB?wVOtG4ol(TjU^4_4$HLxb zkRE$Nu@XC5ZvikPSA9Les+h>V7b5uzIw!Fo{Wr35JR)0wWMHxd5Kcf4{5{ci(r`Ab z6Hz}LXjPV_7EYGZ=$EOU`DbQM5lb@`u{eU*Ip}}l)$8J67qz`v2iLLXK`BjTlP@qjAVlcMZ16^ZOct zQB~_u#S!)DU@IOIjZ#i5oUQqQx4GoBTfI(@FJkAZSdBF;&>2!ouCWTUvAHGiwnJT7 zV~s1(bQ@HIln3$AB;2>={O^U-OEuQih=h>v4hU-45bIiW8IKLIM%C#v4N@z2U|ofh z*4&8PD)JB{7yFZFEwyy94QA6P-)-TSXgxNIupqLC#S=lu6YX~N9ldD@dq96lq>~B( zr*0s1I2?OgkX?J$DF^ZhN$zGRE*sn3(}(1{$2g=;ktF*@MFRUYi4@0&F0C^qTD?BH zh=e2}jf@g8$Q@;!ADKqfaNc25^a~oNSiWA-ig4>VsTC1f1g7YnXOThDbwzzN#-0T5 z>qA=k(m=t4`UH_Dp+0ei7Oy9%;55dhq$jF0-~{ezMOM?crxj6e4zq>~7a_ynPbAdx zg)9)FF&`_C4qTN-i#l+)RZ)CvNThniV0zV&Q}XpHwyLv-TZKiNLUBV6-jHPuV{MeO zt?K&WR`riDJ)_Fdb6`PviZf01tL43xo|faa%GyxS^rZSh`I`y zR<(4L6<0UqTV?9uQP%g;U}|=j${1}`4A%Dqkh=~~>XLw&D4PUklO=z_H2%&~Ge%n% z`LopXqfvWve|0qa&M-l+teAhRI&_S+IwOp_fiskPS>A0?yXEn)svK*TCO;iI)*52r z$t^eS=yE*=;~WkJS?bqgt)nWo>f_6{gchMjTz%8+M6+*?rM*W~);Oy&+1|);)oHZ)>Sv0W*6dmF9jR06KmYCi{t_W3(%Mr&z5kar3>h^Ax(D%X=#=&sI}LwI&Z?a z#THL`@S`cs6-%1a*pJ$i9?0p$Z-UR-ksi7f>T{&0pKi5qy!E61&AdMvZ;hJ1*TIa` zPVY4OznHW7=>eAg)9*OjmU@h;BmdLcdI4ewPOwI@X^x!$CWqC}Cs?Oo!q~~nsxVjC zMcCkp7oJwH^Y3QV5a`B@CkZHJ6&mXh__%ZuNT_Vp)B~+qo<#ulcUY6Wu)6I)tBW1F zVxl#xE}`+M*K`e%mCEo|gSUOGWfi|f`h`$~T_C0G0%*O*@YaOl;+}$czR=W!e!WTj z!lXir5L9O%e?hN3ez!iPrXFM&b>a@B379rFhyo4W1I;lv{+2@<_S{W~9OM{*zw2HS z8XO^$S_>cq9H$v$=Qo|OwQ^G^IZ}Z(I=Je{EnZudBFVW!cIm!@yC-8t$<3|TEVc0< zt6&Dh@CMnZm)M>FT4E4;=pg$B>M6YB`1xg~^51Oa*SY6c89Z+x3`DIEGZU!Vp_@h4 zN{A$o6Xz|te6_#6v?t)~Pfe&?;i`iBlPh^oE20|^PKoymRR4Lo2I^OXCRu|qNY0vM zT?`$3X_7T0{V(7$aWFCuL1bInw%;LE(2pRX=0ww9LM+MU{l=#afO)D@0Z7HI8OcRALSyoM&% zr&zLeIgL$i!NaT{Vq*2u)1Gs}fHVhxaI(NycA!e=D4CaJmI@nhWt_4)R}R9N6MQ?0DRrI%A6nafDL z0WkdO0s{l9E?4=}I7zKhKbVGjZ;evZteNr(37{*GbanYrb#g z`ggSDeIFw*R}-hruuh3;r4?!uK^{6cu2O9?tPzr}Og1{|-h+8yo|*s{CeNpPVFHq^ zEbU0^vp{d}H8Zhv;;=qprga1N174hI*~V+HYv22=Jje|e=}_sjSW9jIZpx22_(_E@ zUhwe6Vzibq(MJw43FSA9UjrukPIG@9&#!?KeGA=RXQ8qhF3bnnWA!X+YA|jWt5;`P z<@oM@!DD3HPLILZDG}jJC~?%u=uIpT7<2)c z)Q@Ldix6KEiBPiD3$v~9CHFlDFVex_+0C8vTomb=7aOJq9&Jr79j24uh$D{R8W9rs zce4z%L|u5a^}|pJ2PL)fr1E=(c%ry0V3E)&$z;#gXaCk+VoCv7pu&KA!+6Ou8#y)NC{% zgDN=;T2Ssj1|ngiFooMEBRQ0amL2|?jNDKnhUY;j|2Z6ERF~lMo;kTykI#5Ou5Ck? z(wJh-vDl(7Qp1k5hK1t-Ugp8;(~d=QyGYd?i*dL_-FK`tHjLX)Er*>5$|w8A;zE^q zoR!B*SEUjkreeog11B3=!W39Az|`nSY0210OpebkoUzAY%{U)_eOS)oTBAYz?l|j! z(Au2%5Y>L1RZyF3Up$Y#{@S-FK2S@s@ran<(w(aFW21SLpC22}1ILc~&?2Yu{MZ<5 zCR#R+#A@ixH73O+h#Dk{69GG`MHoQ~LRv8tA~r%D`U9(wUGl>pSW~J>k|n$Sl2QmC z9w3}3J5oLP1FMS20*QBj6u&upgnIV})^SjFmsA6q#Ltl`JjexYLH z>#%D7u{AU&O8(V;ZI&qG1S_(i+yrw+Dx*3tf=Uw*FV-f?2#0gYibCc>9|&vcLs5hP zbw{0G#fC)jRfwTK&~GKI9|o(8Dco|3pvnZA+=q*Z!exlMmf{!|#9345h^En6+GRb3 zvc%Jki*w00qPc@H*CfhQidauafvom-0GV7J%Gze3)&e~A8oVpm2r|4R;ONR zrKw9#w1!OajO&1k*`eF;K|hFAT8q|K`jV;X_Kj7!p7)!lG1qJPji&Rz4}6@;#ZKkV z9p5x=6$}yoCI5VF((GH7MLGFN#txG*F7?g_duep?^;V|3_$13Jzf8}~%S!gGb30UL zapU$#nnU1pAxp?nfmi@2iJkT&I zp0`fH7LlnkPersh)X-C{0VRASFeGImNck)apo4IJQNL2nJ=K~Jyne|_bYTSZUh=agm=A zfx9=TTTZto1ll&JS5CLeizJgbU8msw5jmDUqRse;Rpk$S@s=8MhV{#=mHl^;M*64E zu(J02Aj8emc1kX;aPj~}idD{;mR(g$^y}cio9$_5T8A^OQ-5I;sV!$(6ZdRh_ggjx zdK*zg=2(OLQ8jyxRXkA03uk?gysW~Sp()i~;MqLKnjk7|*?BqY^wm~DC6vsLLQ26B z04W&l{^e3sWzDt5W&z7pB0h)PX3e!``2)*))Q$6i;w?R@bDnjL%L+>4U=c3hpovRX zG`*B{*v7)kv5*@PL`f)CH~h%TP*0p??Z;%lKFjK0Ivqcyxxc-w!nM|ffsOrAo#|z7 z1h-`Q8}iW*v1EptTWbxjghzPv89w1MjfqxA&9l|zAW zY?s!%in6`YdM+hsTK|bR2d*%<`Uj!E;4CLDap~gEZjaETDli%~Fh%}gIT%{Z8vf>f7Yazrr zq9Tjns4+&kT8&v`4fhwRQx;k0WVrHYk=n`U{CvBTb{L9rwXVOJDX6S-E!3pOHOXy@ zLKbMGYO7LBJkQ$S%9c%-olro#dqL1KEJlZvs?&AVFb1y0!D_V|Ap)T9xf@AMx2iIMLttd85x~>WA@W-pg z62YHDa4F*FVpQR(=Jr+{@f}*W{;K;KD1qL&l!lj;I}m?d@N-O>c{7^YjnGKRO=BFHci?^ zK&p29cJ(}`>b%9=3W=AWO=!MygkrmTQOt(!2~|mFr>5K0(*auAVf9NJn%bL}EFBSO|g2&tIUKjRY}E{D)aN=NDFG-F3Ml zwmAVONO(Q(N61O*`;qQs^!@NT#E4B4^Y^cl-uDFov+svLwN9FoQq1QN4K3MRuz#hL zAp}w@p+iZnlhPbWeQjlwe?{7b=hw){ifEx{{+^F$o6gh_v>ZPw))m6 z*_1ZAJDGS_h*yejIbRF8;~7|NSI^*C_qA_^+{j_yihHEyvQQ*<_uAUWee9 zYMC@ui_3X#vYLo4ggVARs5uBZpM)>USaSP@5)bRnZCr5}2Y`sEyx&+Qb=fq>{Ai3+ zM+pv;sEr%a{J5YkbohPmIYO%wEPK^xYedc7;m}5@3kbwu)ZF*S0}= z6DdwV#Vh5vR_y4+%;5;QJ?Pfm4)L|-S3F%WWf5OdqzX>xS+adh_q$jmfVi+&SPJW@ z?)t4&7AbOwaO7NEHpP^hQI zc1dD_Fwczndd-bBd*EQ|vT9LB;~X~kekdXs-6Wzey12LMcmgxyhFmSqF;xUvt@k16 zT#clu(rS^8Ih=xm}qlD zGvZ@$Sc(mxaY+{`w5Evv3tUTQ=`(1xOw(iFHST9| zp?9Io>sWz$xXvm}%eFIOVfAvIRT?a`!|Gpk*1$P(a6|(vt0H0)MtAvh6qk_Zuvt6} z7kgX)lMO&_%!QbZnu5THv@4 zi9i0JpcqCwd)#-;16-Wx|{;#DNq+WWJ6sOK|L|A$8jlE9x&)ZA+{%6M$ypTn$Zq zl;Xr98ZLq6?=>`K{^6KpzS{$A_EL`QgVn61R?+E$=QHFi&`@B3bD29Ka5#q#-dL+B zz?_&7BGOnPx87&)M?OhCnN8mZ7aqEv`qNTtVpV=0oU80gNHo(GAXhOsfUK&PSz~9( zKq4X&csSr{AM{QUB9_?1B!`>>l=FdHu3&YQ>juj5=_q#|c2ud~FGE=tRu3<;3P%gK z&9L*_Gi{58Q*zs*bJwR>!6P=Q{;1NHqi)UCsM{n!*4tP?AWg{34(o?y1;u!ry7bW(7!2Q7_jXUE5~gQv$2WtZnq z9ei#KWEVqO+>z%d&Y`uj!HlIyx{Hmn$4YNu_0)3f;JQjJcU0Sz@pRcFwmm@~0313N znwgOl8F-@w2id|C>~vT#n>|Bv2l3>5QW-0S^@IF!WIWiOB*zQh*@G#W4FmX0hG?0FAdEXG7O4{C?LE$$#H(%)3Ls0#c$FPqi0G4tpyVBuvX2t%?K_=m z;_te3x+hG|js={=VuK`KqMRfP)F~^hA;kr5%GIH^*6YFm<;;XnlKTA$xY_{pb{DLJuRg! zxm`5KlP>D#F+^0KVx&cbCJ)oN1&~AZ*li4QGD-*ah73aHlB?$;^8zLU3xwkNs0EsJ z7rfP=@V*QZ&Ob;{PP_rxwgq%5@*g`vz$h#y$73m-bD>9&Gx%7xOL?n!kuA77)&@c5 z$BWa&jXxnO4nVdPQGwRg6)Qy&74ch~FmiA|&x#GePmI1GAOd@l8NKoL^#{t>18Qmf z@EK)b5JiPQ3&H7YDdGwmT+G8PmkZ8u4(0z;5EvjtLq{?(UuHo?AwfC9&0NzW%I7I$ zKK00HL`}QcnhFR7`=Ax6By+n!Jv-ItxR(HTa`hGbvGiv$s;a2awOZ#2Pjj_Uw+FWy(O#pmo`s!s4B7nTAO0Anjw9SR?YXX`o0(tv6n_@~;v{ zL$)O&RF$i&ntUxb_8&!9ov_L}@U&ZVWB9rzww81T2tb^4%@xppu~O_T9L|BiMiigi zQX6 zS;~HJDOZqh&P}5kifr+UZ1#$@x_ehUSa|LudhaGsR&-AUJJ8sQG^Y8cbD>_e47*Y< znw(@hpdik6AsC3C8awk+u~Yr-GHU>ctBIRNt<4^Bkq4`fzFLR|A}Fj`wsyJ@ge3D8 zKzRZQ=SGAvVdRj1JxY-oYhzXJ-}+SJipN@9hULa=Io~X{(=*N`4RW;_ak;fW(bvzo z+&cP17$xY(w7?3{3tXqwr%4CE?S3vFIL7?_MSN~%WAKx|S#GwZR+%EKXkB*~eA_hE zLM6oP%GlSbyhf|QY+(nu?gTU?EGweMHzE_QRX=RB##eL$SrQS50pG1@+zy>f)hJhH zh6lENs}D+L)#tZMo;-=1D6y>fbI0VMi_(2^dpPu2%r(~mY?>6EOz6qAjtN)Ik6 z>-DNE4-%gsYma5Q;A)m7EY5P_1i1`AI`yeR7~gkZmb7au%lazIGl2D9{i=L$n4vJF zV&iiz&CB6tNZTpb;Jp`k_e%8}h4J2VHOgtSa_fkrrsG14hG-+I?+1Hz&COHv2|Szv zUu;eK>OKtHoxjAGfiY|lx$9i$d{~53gqb9e@N+d>M;1b9WasD}2%sTDl(2KFgSNV^ z$tojCO8Z$>)d5-+Lj&!aFLOEMdGKX~uZKjoK3BoeBBybevx_gCKAdN(>}#!fmXQ0o zoXoP-0oPiSgV{NLHB(u!(!O$7E9W3rVcbVLb6(n^?!DGJIPlpH_3^b<&D5~?SA-o~ zoSZmP$mz&)vFO5YkBLdr@=Rm2(7CXHYS%nD9@*}X&V+BD`pI?HDYCxUED#+jt4(EL z?Um7=N{!d94N`>GIr1_xF>F1V9(PkB2{e830khkl_T;c8Zppn`sutimROdMg;Ke1U zE|CrJHceoIyUxR>@)a|vEje3AESTorVTrH@Fu&O~pus@xp~Y$=g#xdzjwK{X!oavm z^W&LLWF}f47~EoKNyVZv)+XKgA0Hpo$JblK5BZ00es0ho@FNwZI|C3Y(LD&(qq^*a zip)zk)HnR0G$9UaB<`D?>jWZKs#45`>X9{8tR(6TMo2}Zt#u`2#V$@^x z(^LR(LL?#@MKL_Px%|G&xbx8*vBBR>N@`&$c9>uC^0UmXC-(-y%b+=y0`iyu06!@;yt8ccd>pTvxqvweS zL0m3o!#6ySNa?vz>mjr>&Xja!YtX0BZwEC!$w}mj>rHnZr19tSJZ6OMIz0pggabf& z+?z#=ki)m;O~JtQ$@WMwh3AQC!s`NWuMGUXKF#rxO$&we=0w&+0ZCwQAX)MP?xy2z zu}(aqgu9vn&xf{*zpBoM&a`^>5jrzOtKhY#vqkb@Z|!L(%eQZ_%Id_82xg{*eu#@4 zm&+Kx%xO4hRShp~-UfJFmlN%;PB0yp;4R`gB=mv!IZjY`VZzx|PIx-_4jv-$BGD(Z zg~f?4ZfMU|-3O9w=IJ7=a(?t2(7ITTssyauIENBKwXL=Nb;2UXr5Y8vxFaqgibll-Tw~Q1 zl`0Bas#epg6%`dNDpjig@44^I%SkvDnouIHY6_IvI!(10UGg(h!F$8A%! zgKd`=4?&ggy)`y$xL1E;?)p2Z-czcJ2rq);u+U3|!bCtP@J72VLy1AB#GbFqpO(gs zHd|qbYqF@QJ8LpyS*&G`!LywRihq|o` zxRF&zzo6a7y7-8UJ-((}6I*dxtnvtA!u|845#g47k+|zf5JdW{_(+qp5ifwqn_V34 zVKQ6^lo3TvM3=O{V@Nroqo;)Vt?iNw{cd@z`0#evgU=)%5h=ri{z7wBAQ&9UCN{<%7J8hkj6#rO|1J<=o}PH_<@&O{04ML2f3`rVedQFn zY){7D*XEY`;F^&g0yjInwN}4yd#oCIbqjlcxFs!gW8VAS5vv(Kzj!y8%awP;dQHm4 zJ~!pUOakW8)-A*h;*TcQ(9lg&As7X^%_s2BE5>f(=?GSWX0m-%c-4(G|rGL@3 z#COY*EnB54?u_l#Rb=2U3Ga*{glpua#Be?L&e-t>K+QN}7{#W^^YJK*j^jUTqKU~u z<<>#A#kQIM#z-6Q>?~3g+t+oB`Z_G~q?cR~aOe;>v`K+;_t@5*e zob{1BWiyTqeJDS1i1Bla{G3cghCB%ay^BEF620uM*eR)D9>$q0XcC|mfz3vYWrO2g z6K3MONtdW{iAttKAJhfOnHJ-zcsg+`?>u`8ix$0sEsz)QEpmVn0SbH(K=SFqc(5Hx zmUj8oGadmN^rf#!61pZ#XI&xvpwY?P&E%yr)9v5E*(}Iog)?9z;32COgBw-9Udk=XEXE7342>~nBdK685G$QuwAQ{`r|ypRsV-m`B5QM)UBw-9 zMxI;%I`P+7&z{9liX8Hr#fcUpytTa<%MTM~X<*g+o>;-b;wwNT!UJr`g&DNX4eqwl zdXlB`zBEV!xLduRZq5vl^WVhPu9B(7xp7;bM7K5zELuw?RUCE!U{cFrKuIMCa5ol9 zp7sPX$O13HL4=_t1R@MXPTa`!B}hA_UuiMZH@CENZ$y^3$vGhJK+kvYjrBNOB5I}2 zCUbhdkqDPiJc+;}=J$2qUtMFlB>pp=kB*N%>WToWSy8^ z`9~yMfzkFN}C4@x?fjtQqnBfYBld3=fZQN2ZXVYv)@pE?Y02d+SBIy5qgQM>K>v zE4K{RA=b#7<;gx+%+qaVoU->8lQ{D0-Fu~a#Hv_+iDz ze5^;A-<1h@vGt8>Vn5VZt&ffFAFmfZu-=&a3`Y<+?-q^JGL{sEtD1N(k(2SClJw9r z3T!=W1#=Nh)gZgD8`NE&StV+CZy;(624YC?d1R8YtRYIzgXT*i_wVfMVQ~36P78}O zz|WqumS7HVPZB)PeV#Ja*dYaq#Jraq4!ADp^~I3;LPoDizsn8f$>i>n0V}DWeIQmn z5E4V;zH-0KSAuEm=`1$NENsbjm5P_Lo!z!zKA(g*I#%lU_y*iz_h)fNnxHd@wsMY2 zW3~|1FAFpa z=qr#hs$|rI!t*M`K1HIZibSD4=HXaf?+k!3MI*d)SBQb5zVvWx%#Q=n%{DS3Vycna zAgZ1kC*m4{E>0Vv5bSNZyp1C5sdB3-lGUa_Q^gGXHKBiI-|p_9rc{fTAiH2#=>7_G z($mc!8(M=bE`+vK;)JsW4%>*Jyl!QL$f}~}*!Sp))>yCD?u_D?zs33w;f&R_72%ZF z%2A~euIuB4=nDl1+96OWK9K%61XU1s^rNk@#O`(!*M~nEtB`!LLbEr>Bpsxx3jD&6 zv*~>K3(H?PHy)|;K)((MYm?y6l9eJaRNO)CI8^>XqLyV7Q7$c81ceGZ7LrZj3m}Ee zY+6A{!dp0B62#C9ECtjgzK}D5!>zUoe@A35Gw|Lm+%sEbUy$35*FdHPw_mA{_!wl> z3c@&8H zgKG#@jZd&1-y?!`upwC9#%?tP%l8s{5q>5Zz*pV+2?Cx44vJsT>^*y#u6~^O%UXTt z}y`lONObK2)ao;07VtF=2AJdpF{k8|M|T&hodA{Hy@4&h;U#r66pVk5g>>-0az#;Q)@UrC}~-~H#9 z(+6EkZpFcVWni^C(nCC2DY9g}9{-ov;L(}p6L&4DuZ7k+LUfsQKf5b2#q~#jiB*+) z^3eu6ndi$#`hgc>cDDe7G8!);D~6}ch(xudmreMJ;eyECQ7^P4ZR`3>pYkLptZViB zC-)F*dg$_}V&zlPe(Nok$Dow4K8Ut2i4RC7C`T$WXr~7t-cLDiD!Z)^a{|&+GJ^B( z^ry_QE_{lp|8ApG-qX8-P79un?faD=jUKx;Hh5GPwrsBKge|*Gq2hY;+Srh8LB(JN zm&RpaMbs7v+tve~i5*j_IbLgs`j1FEF!!0*(4pSZ%qEqzW%J1SOA$cL;Y+Mx=7hQg zmeL5@?d4};BZt|dFOcTq2Vx#0hyM@ouM(%xL~hhLuFs3*iH6iD&l-Kev#~J;2wLnV zXd#(u1vL->`OiLFGUf{5m1si$9H9u~aa%->LPmYtv$4MW-Dgx)qL~ZiNXM|y9X}A5 zQVl;8>3tf^Squ^UykU_IhV!=TVuKT2#Z3ww*Apmqhc?uCQr-3IV&l5C7KY$8J&(S9 z&~vd~Bll`I5wR1*?+)GatjfDdx7s^+{kd4bY&=|B<*9mxY6Z6I-B7L4opcWmrB=_1 zd>;?xkW%AP)@RbvL8Y1uRT2C@`n8wH_t0(LcH+yiSa^Q1wqEOg=gDzqa-Pp% za(uXvj6BH>O3eMB-mwN8hkyWHZN&pYj|?vZUWpCdyWPxY5^RYojTaygFbHS$#Z^F! zbN;ul#CjC?jy`?OE4wk_aS~BAEVM+@!NiBh`-xXGZ>6%s;x{ zWB+>A*Y^sVUX~F9>zR7Qgr(pAN36cP%hp%_^wn5lWOtFqR4>SL%k4ide69O4G8-XZ zH@p_JdwTk-aKr_25-zQ`Mm@bXroa1Ath~FkgFdyz>IzU)o5=yBr?~-rMXa%m=W%(_LpT1D zQC?!qIvZjpRqAf=Kht5|zhXa47TZ@B#gUN)M>fVJCc!k;ZKbP+V}fyaM-YkYQ(wo# zS=#P$!-kH9t$=&G#N1vGuTMzMFVQA_oxOqM4-%r{yV7i=?u-~5HWuH-&#(-^oD`B` zVfVyEq_cnm5HOFDImxp+zLf!ZMGc1D2%71GU}elnn(Hil z8##+SlUqJKdnOwjuM$yc#;-I`Y(U~yDv9?NROs!M+D%KknQx!BV#VE`s2Jf(aP=FX zv*fk9;;q=k`eFhEyAUT>PDLV%Kv`ykx)I+L8`IC+jmJ?VOX!DZi;$!J0Ztc!(E7KV zV#9uVmpO&d%*_iDZY3y5vc>B;;vj~XEM#6q)ig4T zxhjp!t4fmfc@y*pZ^sf{n3zOj)$5ooVkVI{IAX%%xRzIP z8b5zZxGxV{2Tp2hF#dcw4!H&|W|_N+e~C=w zdR8?+-|!xkY>Ron(X6oeOv{YFvrK%*{tc&P@iGE5Ja1?jiQFPhyrEnZuCIKO_{m>9 zo8=u|I4-w8SAMbdW$v=F88-J(@d;&>w3e&LG?^efp}R`lU}PyjV}i8aRFSvp24^- ze^p=leysXH8Pf}%w2c;=F*JWzD_$j=u$4}cObK>*88RBrCjHO%N#J|0F8P2o$oK21 zAH?DS{ih#b6KvG>=dtQo18FqVFmE%kZP3?#6dR~tyR@?SSOdQ`OpU%+5u)*BnoQ8` zWm47WgZ~EFX?RGgdNVoJ(tunBnE{VQ`tHwT@fl44FbE1T<#Tp&?SrJ#?j@P7n9Q0| zq)DFZB!_eujG}3<0rn(#V3O1uWDqov7c#K6QvKYP*bkWcm=Dd=Pxvr4nZIuMkfd^r z`cEIm#`nCpNHTy%XZY4qJzy&dy|2_S zY>f>LH?3ayF}GX1tjB#E`&Jo$*#M3({wjNpzTxB8Ar#PWd~6EpicezWID>ZNC$YG^ z{^=*NvuO37{-2RgY3CMw@TcC_Q$LN3lTz1xN)pJOLwc9^Iq?4ne$Kg=Ol$-94CvtJ zzWT`j#`?(2&-kwgHvQk&xW2a#)69jodC7b>6|$r6dSF`-rz5JO`GtyX=CrK)3T z_9f?*efFhq_+M<~*TA!i4bPIBKn%}H=G*rb3z1H9EnW-9>Ps;vp31jZaT{sezd}CW z{;e=Ik8;DwYBD()Xw!j{nM+1Gax$4>nv+o_gOhQ*%5XBCX-=k}`6AZihr-7GSZsVQ zSY$hob?JExHUkba{fiJFb<%eiaARG12HuCC1#M+AylG#?`pHf@>&w`_`?0=_#ff~0 z>*zYefC|>>XX;e-mA0y~=3d!XjdT`?Odu2G=`N0VSaju2)MOuVuDnkTweG+2UuvI$ z;6I__ybOEsI}j$i_AfF71?T^fbj% zoGB5lh{eyM7hmmq=aPIE&Sou3^-EDzbyOzVki=^p*ZpQG+cQlzBzr^lx>F}}ea9B4 z0bIOwT7f!jzw=4iFgVg&KtM8!Qy@TD18B-^`QWbO1xyxAk3y{lNheT!Wcx4Imldi55NDn)RL67fJyE2_cHH2Ztl_+(uVRfC0}@eZ zgy;&rbZtZPp;D+j1w#0VFm&9N5o`)sBrCU#l7rZ>I`kKBTFxJ(#n#l!&MSyYJ3xp_=waAlqH)+ zGqGbri5gKPJOd%AEUZVq7gPD7{q00hYAsQFPsu4L4HHOp zszGOTQ8#+&DK1SJ)-EO)g%90GeCBE0NW$Zdc4XY@nc5MvGTPB=Z`yVGjZ*dPv=Uxw z6gN*3T%}JfQ#Gmf*(JH?#;hIB0O34YWDS0M59uO-EFzFk3jwZIY$J##*VL!Ql>qOA z)cZ=d4>iY^OH#ylaXo;cl}0uJ3n(mFwv&cj8Bq~tqwWlN$xO^kBi@bML5Sis5*5>M zFcV)~`QfZGUJIgR&1Xf2ipVqu`_c?2qs}VnOeVKl>IB$F38T%jMbU2c%jFP}WxB9J zjklKS!zxriYpwoih1$=Wua{J)$s*OfQ=vwcU$`Buqn0Kcie}jF>tQw}Z_&rv>R>q0 zWwzQUHM#FVi{YHgHSJlCWc*Sj!QtlmW!2Jqu1z088` zdJN|h5HCiDiE2SaIyl@W4S=ef`xV0dwZ^|*u zCd5-D@ph-9h~;vVL%Dqm2L+514y%NjgIFavAY`wg=5XuUV)9jpUT7{s<+24~R{Zy+P~R6&%V;Y<7QnMQ?Y{<7ju3I{$Zv#NP745re*^c zca_&a%TeJz!67M_BEBNqQ&1n&r_n^|Dw;5ZaNBOt4Kd}6Zw+eMoohXgA=(Vz-0G$b zFj~3elR%UTqL=M2%k_pBi2VVouo{|LS4`#zS(P>#6(rkeh6;G?g>dWP)+ScLv}YE9 zR=^e0zo}w#mmEMC9NAJVXBQc$GhG(k-R75(hUSRLk}})UWiUvG2;bsZjQ+z1_!xJ> z2D)N08}k3dB=NvQ*nx($vjrpHcHd++GKFDW_Bi<^a*%*|e2iBTMk8=$+g>bxcigsf z%(iWmB2C!EWz`x=(%W`HN#;hB&fH}b;@_w->7_*DHai50vq+j}Wfn)tP9#N%xV^)} zc;+Vtd+q}6L}SV&JZ~2}3(LRRIGHdyW6B2+RC}zx94bZo6HsO0Y(<7mnHxWuxPKH90XuqX(j>i2G$^f|Lp`|Ob-?R z$(Q9B5l_$_p|J*C8cpjEM^>Nh4Hz%ZYElYGg8KzxVO9j2@gz3QSdsl>fFNe+4Eth3 zov+ocJyiAK%cQiBQ8}<9zeQ-^X%5L59#0J%#B{f^Q+K4#qa7gy?Bl$Mtr!{Y}sFw z=>z(xi9K#7Ag zvxal^EpcSBdHPNNV_8BSXU*2%OCYe#)0ZZ^DlG{$&6=b0lWJhbfaMDDV89cTY6h*Y zNUBjx^z|gFB?p|2WU&`_|HNa?xy|N5#2f!}s7|oX)4c|%smPT-8HCwsE!VND zB*JDhX$86D`7eXi;L59u0gPM#OA1bsCILEYusX`RQ%@hv#8&F-2dl%aOZ29}>YFq$ zeh8~HU!O1p1$eD)9HI`P$l4)j*W_jzqP{`NDSL6kdA`12FV)ahT_ZIFp}i}6 zCHK-|#xmU6pikLb^(b6NO?b=(-LSW+8Nx-dW`drZn*-Y3M4u~(hfIKq06|u$&C-wV zt?I&eEYPp+t@?*weqHAeS7)c*K9756=Q<@-Au0%2AMF0^>e<<^4s`$gw+8d7&M#%m zZu8$m?-}hrvzZG|n=)D$=-%BlC%c8A?gjI`elE4Fa0(2G&rIg`3&r)e;p(SE`~7f) zdcOMeGJyqVU9--qvD`#EtR=*@iH@IqC!&0#R}X|7yu9 zcM}h}+4XMy+&*f$^_WiXi@E4AJ#AmM`Xc>%`RiVNoBZ_;{nEZ{#h3N}_EnlN&MWs* z*Fzij8>I$@uPN5k*Hx&JV&k2*;P%(*-;JWuwR*)Ub$$;|H$g{|OMdwmRN#se%f3!e z8?C-wlct)*CSH-8WNrf(GDkl&+K|mxN3)SP>QQ6V)Qpyvv1f|qCP%-etH-Eg_HS>g zmCr#-A}<#spM+)ma64VGyT;T;JMwGy-CzBzZcb^u7y&(;3?u#&yKS7eD=Ot`OtPEm zVdGVT+*(JESN&qQ7u)ydNzpYGsPkd-z2X4K)z$jW@rKBqasb*e6mtUf?Nxo_(Q24} z=KysBzb_oCCWb$c>SxB%-gdoZEOc#-E*NKKI%pjB4ew*0o(1-HvyserNoLqS9sS@q zb&~a(?s1?x6l>ne2f~}$^eqRfsTrVbG`_bqC<_lV9SlAw3*16P10d2wV{o9n8v;m=8B?gH}p`dwG;SJ{qe+az_Fy-tsqNY(4~k0!zx*XgAb zVNE^ta}!k`>pA`5MAocXJBO%AaF6dC0w-OjFF7PwgTzR01-JsUSP~bt!@)d5B?K1+Y&4mJIqd8isg56cfVJv?(Lpy;VL9jf}%QTZfw43ASMnf|Vrqz>)0=BCF6 zELH8RQ#F~%|4W}f*{u1p$*lS7`h&^pL?Cj+6a$f8Pw^mf?-T=(*QTibGB(U2H2h+> zsVI%|Ll0AX%d`(Oqgip78BN<^SqQR;I_*Q4ez;luS%;g7iw`#yKRVo0?0kh7;u(nMdg_rXDQo(RBh_9GIRPlr zR6-F+5Z_orkxJcsB=;D%ue0c4kO7jydIaTd8jn=ick0@y>gY%%KW0od1H5jkI!55U zWvWsl{Y*1+88FQ+e*84Q_|j>n_)F8gWr-hUijO)<9Wwe~l`^1>r9OQik_Y-g^dR&B zGb7or-DMmwEb-6qSIaREAssuV8O2u@V*(?OUZPa@I@;L$As+Sm)T7gT;1NA$Iym)) z{@HW{#P*p>3{SBMaq(vUhlbqC<)!1!tKKD8vYze&9GY0fB7$ajd}xJ^Xm}6aKpDcr`G(j+%%= ze?A^bZ>@H|2`ief_y4Ba%iZ8_#zhsf86|YKl*{_pg;mSqVSUq^gtxL-)JY{x@lR|&a2M1`iw?q<>pr2$&-}uvBswQOr#i%h>o|Npsp{w^i$Y) z+zJdS6ZI@HcM=c*YBLD`Z)iSjQsibr{(cNazTlI4Cz51T~pLkfAMX# zxBlUG)T;1ZSGL4YQei9Ha7RnO@2ZGp9i<1ItiE5p9DU#OUW-!_dW3qynSmuItINW7 zzph7}qHeQ3*RP!d-hQs5r>YkqD6gEVPK&jQ6+ysbBrxO-gq`W9L8VX`PE*63G^37+ zP;9yD*0H*C^rNS#>d_{Q35mv3JulTvn{G`<=whAsW5DEOb2uE7nb-L_( z>PP*(ZjtER5ONotONePFfqC|Iszx_|PsOlCKmI)=unYA^-&2F5>m;^fsZN{@oNv*G zoUY~q?!TO_zQu~1_OHsJ`->suW|$;LzPyao^5Pb?e;8xA5%NTcv~N`hM2Pg1q)-Ge z^J1x<`+YQ_zqU;Nf%>);e(x=P;|~$|K7Om^Z$DI1Eb9f`=f~;;$xUhhMD>wel_Pmt zm+E7FqShSHSLjGc{2{_)9!vA=O!bg; zlm5dQ>Y&o`+~mMLB@(i1wY+!+GGg@nmBhmI)kprH`l&waXX>*^+w8ge7e7~zTaV~b zzfdpcN&Y~6*(_D72hL*SzpN+DV(Pc+3ugiBm-QX9K==pr=2>dg!10qgDjgdZS{_dH z67%h>NnBJoe)5?JWuo^Kkvx154mW++nb3kYTA#@}-LLD;Qa?a)oqd*axXgW)8=Io< zI7^*S&~o(!CKjgTY)mWR3EDjy5ZrKlRa8&EKuyv&oUMw(4NLUxXDb)MI#I1RpRG=j zH`9KJ1O1xw^;y4E`-GRDub2K(o!I*!gV!O3;OWcwFYMLaN(Nit_DVhdOjX@7>R0Mf zD}3Fb^|oKr<(fb15$CAO!xyya$Ik&(zWk^D;vCh_UV7>IcEa2MQ=91FzPDcQ^BXlL zynL0O`5XGZ=s)_h-=HHr(xmVFjp`eoU!m9kM*UmH^}xCO&9m)XRWHvk&s7)5^TOZK zy7#Qpul?41Z2981Kso%`m3qqmsaJYjA^t*99b)D(T+4lRGai~^`$m1>@6~~WmzasX zl9w!!or^ogA~botoLlFoG2-(hlu#wQ`SqR{|rM`QB6uanMaty6<4fG} zqy?MmeTvcmpLp;oRKog<4NcSkNJ0Dmpq&PFMp+NWq{v>8&!sP14&9)xgDVW3tw!a+ z9((B1W~+F050K$(Vk;e^FxR3jC&CogvoT!tXnA@z@Uz-lBJ;2<2q5C;sVl25{|hrL zNW0C*?oS{+-=01XwdLBsv}`?3HCy2i|J-ux`N&Lwxn+H$$~V2F?}e&gvR6V%?o$PNwoX6 zVCYVgyWttWgqs&EyafEeT-RNqrX{`s98QJSabnWoHkn-9B~;?oM$$+xx>sLt3DCW- zW%(s+6YJ)dwU;7LS~s<9X;Rl%;aAVsf4E$Y3SZEuqwCd}md`II$Rqs9eEs7EyuI-J zmKzqRH>_b77|#n!nlBC;W4nmjFb1^qS`Jni)znxN*y$JP=7s9|G$}l-s>;zn(S#RW zNp^2Fv#7sEj25ldTQq3+U{sHY6{^Cp`&ekE8$Mc(T%<;*MeKEMc@MC`J)HwGlHA$Y2UWIVb zro&gOmEi@e^{T7Y$qHLgp0O;z3XD~)z8ugRm8G{ja#8A?MqeId0 zV8Ty@!(q?L)2ugNgH7mdJ>gn4Dm6GVos+vr0^Wb|ykVh}IHyn^4u@zfPX>GfR#*Ct zWAI-jW01)FJTcqZ^Ffl~)*WU(6kKM~NQa2?_N0`k?`wyu`7e=8f|pSEi6KXd!bPG> zk%8fo=3x2Z`WK^6wY@qrg=W3-TJ;MUYyay|IzQ4UUdM5DpOwMs`muDkUZ=hl{_o}b z%jTc739{MU`oSft zx43p+S%Nx#FNuED_!^vVkO{J`OG4o$bkuzA_IV_#uen9_(zo2CDll%`cM}ZaBHeZq z#<}P9-Z!i2`lXe`x)mBDeqp=*R{8>$IjP>-=gMO@9Sr7Q3v%1C zmD?lCYg~+a_N_2pTzN}wRmooeD@;p0l1FI`b;2%|HB^3kt!}#&smWfd`VI6ZKSJnf zp`=ZbEhM)B;VLN{ZXIfJ+LCHke`l#0me){#UFPDYDv>{@;MlT|zJDo;^_c!(DHei8 zUAs)x1EbN)!0?Ooua~LG$p$X+qFwMQiPcRE>;hSSHG^TqPT032F{&X2copfbRlB#T zn09{9qoRoJrJB87kGoABP_VpG3T*NUTyh(92i!|JfB$(5Bcad{oZmlAs>UUeV^Xvc5FY5`l;KI>+fKG%UibJq0Wwk7v0@*X0v)VV&7rbhkc$0Jr3I|T25W5 zR$8T*lAezLkp^zjhy4*rXoddqAJzB5pOxvgeNR?jP8q>@3*L_erKFlL#))3R~hTgj8(LHwO+JJ4e`H2mE`*~s{qZm zGX2Ra#FV9a^lFN))hEj17XACxGQ^haSF7`_@Lf;nfe*m;Tc2n-J?d4?&%-ntn+e3wX`|l-Y z&rzu#uLw}+MHxZ~YI&5nC1<-0H+7h>;0nBGah|vt#=}1IQeJi;O{?q<{ags!Ks$jr?Bdw)rfQQ(^uNJ0r6L zR+$cd%<>ny^d&FC`91zn4QHRyU>d)Kd6?aWrk zT2*DqMj3?kfE~ScZsq>gy9PP+Z(BjsX5HMXM%hp**`xMd5oS|hzuNL~t4J8@7wYf* z2?^uHh5D*LsXtqD^r?@j-?4N4@t8WsxiuOGbxY&WP2&AnaByh7KIC!rS7=-16Kd3c z&xYg>OIo!PzK0Gc*Tbb%JW-v}Bvmfxjo6p!bDvOs3$|T>iZ;JU-~CplDmI5yDY>Q+my#^KRac}mTXtq;vu0V}|rOX@thRjGZG-ue_ak&E=Wr_~Yp zW?K4{r&X`YcCA%1E-*FoIt(d!;c0c`kk<9+?aE!wL;MU`K8S{me5-ah{mp#S!yc%L zTTAp&4^*9OEooWtK-GnowMy^*P}O)I=R8z3AEV%x4^@rsamkkim`2+rR8)15lTnn8 zdbp}CtiRMVAFkRb-2A1!;o++4-tFP)1#(+OgG9=)#x8bsclDQg{lirgpsebVswtSB zPI{#3Xlv=hCAMQ17)ytgghb_Gl6mfts)p$Tzz+> zYCu%9;Fd~#*?%jK(!c+!I>&0!pZ*nN;O8yJK7(m6{Ni>!_gQsT(fZKQs0n$X%a_k$ z;n~)bT&Kz{YkkYe=hU@Uc-8s(+2_H#b?3Jr?;c`>b)!CVgL(^9;jSS)dg-HIP_tO9 zM_*8Vs;+49{5uVKR>aWSkpAceHPQM)Kl-9_^bs$r;v;5raVu%GC9)|D-#{Fe6IKX# zSU@7|4J;?({x(L60-xR$V9@eD3<9&o5W&VUjj9A!o}fWT*{A*<%yXar1nGa2r2LEy zxAxatUQ~TjG%EK8Mh8bmjEP9kkSb&AaMcWn7?CYgxPl8?g+zqFx1?pnMnpiY0HXi% znr@)E1qd@^BA%2e3sPV|DEIORon`QbsT@X0?#kiQUp@8DY5z8zZ_r$+rZ4`R>S3Qi ze>4*{6uH2DMBn!}6|b37Abps@gv^b+vSZsqdh_4ZkIFXu2Yjt_TX@(_`qYgWuomfa zH^RODq?c?|Klc2X<$p);I8TrGyHZ7809~^rdfMMrk|0263=Gf)T?wDFS%1_Nv051p zg%I5jmcQEJ!Qr?TnqJ`@v`_W#fTn{L#pm(;+D2iGq$B7Y*xWwKaT z?t4j%n8@lt-)UTx*W~mzp64lSDh= zIu#V6%fj)n2xo9YN#&gvnGi=nU?sp+$u{l-1Bm-`kf0b!V^o@%J_j1IKOoP@?-F|X ztLjfFF|83TNLTJwei8vBKR5HU5=`Q!SOthomY;I%JMm@mQv@OW4f0cL1RJ=8!~6w_ zhzw$=j5ux{;R7iG~7%2%^5EyO4lj+ zUPfa=+I~kv$goIO;fb3dHm&>!uJd6+h;Q^n3j$k-YoL?#Uu*e`zeg~{#qy&q7@I(3 zBVSE-zZetVEWh){gx}!lhEn6Ko2C3#Den%Id&a{iUe=t@1#N3>?#Bz`io~Po_mwoueR-1Y0$e4!RbTeH8k^c_dzsC3oa+v! z3LT7y=fcJs75OGYIXH#qJmgw&d#Swz?R>cPV7{5B6;CmeRU#3YS|X$GYL3B}VA|j$ z8i7yn=JC_tFx!E%Pw=+!(+eV)0vYbRV$bT`e+P1;qCkut#PG*G9Yj_%iDm!y&`|jkn8C8S`BdBc5cehLcEY;a^}Nt-Gt%_}AZdr6P<^7T~e*Io&{UK~v@0E7tr&9_wFB%t$w zCNeh-069Nj0}T!aUi$9CE0w%ViF2z zXOr5$YC&LuF2z9~awsjn&`3s8xw_RG5b1Dvj}>&rh-kMZ~>x2R%>zjli{gTIPC#3=i&KJr6?176psZdKK~^w|HXlgr;Va=j>H`7`Z*>xcitA=LNvfB&O?=v7MC%S3T0aBuJxm(}{R zt?HXakA_55j=0b054NhC2ur-;V>L3oc!~b=$7*nR(?b2>$EvU0hWue@T%*KNZ(OMR zeS(kVRej|r9C>|Jzx#>0oe(wsDMp4Z`uCXLh$q5{}W`-0g&7Y-!oHU9Aq|_`l~%bu>zE zOW1P0VbRDp!_HWeeO?xJF7NRS<{=s6wrT@p){2GL;@DgCff45i6kZZ>)>k)ez0l}_ z@$k_>ScU!ILgmbEDb05_gb9xRXMyt_)Qju&_(JDk>ni>GLTB}uOXd>!dZ|;Q1UG~h zM%<)(;}sW}msxhF=if$;-`icn13ms{Mb4>s6`n0}9GvTK7CC*aM|FO&lPH@fp`&}d z8%(*DeT$uX%UafQOo=lI(6#7NXSv1HuK&0YgQK3+$2qp8rQF$@IO?Yso6<6-{xaanHwmmx|;#RrunpgUvm@_Q*>|6e*oZ};7TdERBff0mqh;4pJ zhUPo(b4HqU+dgAP1i4oDAVAtr+4G!|Z)mHnXSKei#@VZU^JdI6{V~&sWy97@ch?Uf z9I55*`YAPk;8rSgdtAPe7Ho7gm&kqX+*3bvzuP$$M~(iY!}|#)i^)7{YzsWl5@r{R z+2_EdKv~FLle|b;+5dB`Qv%$Jg|EY)bf8h^SYqzIU-79@0Qxsm1;K!peQk(c) zx8Z($R!?Wim_`B0C0Wba+r4uosy%C*wVbu?_B&9L|HD7_<$7wJ(;s``nRU*+;m0o0 z1A93W3ZK0QE?9*H@5XcTCg{6*IR`}>2>q~|^+&y&{j9hiSnnip79COVjOf+Ky_w=F zD1f12_m=Ca`w71+bQ_lIh4oJF+HV=E7tixtFk%8DW-0_4udR0mT2Jb&_0C0=eMg5y znnA>!F3s3&x~Vq^^v{+%dpr5j)T&QCYz0?+Y(;;5>P42Z#C=E}xATvEMTe>0Cm1=* zoiD-69j&SV_P(|F--4L%n0tdj29*1IZ<2k5MfGp0hTYuEwIV;w>3P_ix<%wb{#PvA?oPObFR_W^pICq6N ztkFjgbe<_3>NPM(|8%HRrzZ?@>a0Qfq(M$o?2ZPSC`&gXPWeIl%Rx@R$O8@Y^nk%m z|6@k*PW&$574|w@fd)tRA00-u@l1jL=E~1mZj}fD7^o2;q#5oyfISeD@m)E$Fep4d zVU8>e3!S2`8SGphzVF_a;X|C0t;q8Yb9K{R&eJ8opcal0I$5B9IMi7fUizT^VyN>{ z>KuPjypZxdyQwsN`*B%esiZIz;52}|ZX;exxqG1j_J`pV_Ta0dui~a!rSx3I+MpCJE^#g3bINf-+)pkp*H@TXqlOJvsETHc17%~ z3X)u!1Jp#KE3_}upN(+pzCjry!-%%#+i+m^<`?D&!qCQsurPoT8OIoKxgr4ZaSigq zN8+mxJ`eRVQqTiBG2FQ({PY+4;o;7+@l`Uq_GyKV3!+pX)Z<4v%g23C5PaX?g!=IvG>h6F zNEX4!EOGB4To|*jn1G7}*N`ae&&qYb(az8?&tpeBCx<^T*Y}KeX4MBrdDGppMnz_A zO6>dfgfR{gzUBJ+W1RED+spNPW1Lo^VeZ`DxdeywK?gV|S^v{lAK=vYBtWGoxb7Xv zm^I)e6bpB`e(nHgzO_}KFxF|rp8wCWPG27RBMoSrMJg9-?#p%k2(RjmEcjYI|wdzwLa}2YF({U2RXI0xac6~lEQ_h z@uK0@_I$l%oTK!_gPp(drFgvQ>#Fh2_u#+V$2)bS1M@qHj115lWEx&{_@nJh(O7_| zK{_^J%Hi6P@@1l93`fN)mY4->MyNE_Okj@3czyJDCRwYxb?dnfQ-e8{ky z=p@8}a0(I8gz$#lhVU#D0Gi~7TllMZ{+Z@FS^mzOY&j2&eHV#x;?;3vuSS)p(DoB?LQ+TYeV*PsZGi zf8LsvER?^v2!)NF_j?aGr!pB)07J;`EAl7G_`<&;nr_AXl4LRDVY@XY@Atg6(yxJv z7;b+|Juw+ey`xr?Buh@D)kvH}xkPowvHphKUNSjCiN=N`%@DUmNrA98)+jk}Ww<>` z#)fK{A)*k*bn#_id_Ut*XH3yt;T%W-{m50=E2bYg)Tthljz>3+utM9opo;k@mARVW~5w?9w-YrTLl+Xj`n)LgVpo#Bldoqtl z^gffFDLl@d>+1X;-^I|K6`;|eofJjEGYYe^6Dz<+Kjy>*h=f3n*B)GU-*g+VY2XrO6Xul(z~j&Rm^tuHMy z2d4@-n4>evfy;qYozX17$y1#ZhTqAt;bCwm zc2&rSgfs)~-X^CQ#jaQ2E`nju%m~7^-<%4e{8$%Fb9zBN2TXJJ4sR>h-<;Rh;3k4B54fUf(B9!E zZn`4zWe@$I6P@w>=M*HWgo7IbRm+^>-omr>1u%sW5Q3||^&MwU`2FIRm%rmwSk_be z-IJW3L!-a*T?6y;zKh0ng>L<>Q=OjdE8hjV=IJlK>#UHtwiPEk-;uDV;!~jJbM;S8 zac(5kr{Yv#_F~Jvr#krEKhzJO=A22m!^rP}W^?p0-*YC=rM}{O&NWEa!%uf+(eaI^ zJ2lp7z4~-IzD2)&y7L|y)N|i=j)rh2et=pvSI_x@bDP(AO_{+J5TznsX}{Pq{fEw# zen)$2^<(Gse%B|$%9l@zXC|X;N1@|z3eT-8taypO=EqL&6owWYx^f%b3E>IyA4P;J zmf1@rMF(Obp-~uKrGBzfNN=(Vt91jxkAg^Td2W$hYG(?$d1n%-8?O}HY0eWPif5Xa zD|Y4f_Y$IhqpmslB@XHc$wN{cii0{d_EXGB1aHm?VHS!HhOtpIPpX_>JcK^GZ!xTD^6nbk9&VK z6pQH4OGe@jJ!6JDlw9r-2eH^(+g%_KQ1JpWl2b+={pbN`MBQaYY5QwYfuV73bCG^z zrqlDtEgo2MKt*!eWI;s|2q$~8-oV8N(Ux`)Z7I_I&Tx_`lmvMx6ZYT+oC5gBEGL29 z!ia~uVYUdtGli8QV}> zb?=-_v~)1m3^om8_sV!B9;Z5qBV={h`J{&^0v+E!!>Mt_rP3q=mIHpmNI2prAqy10 zmf|v4W+WG<*4Tdzi>ViYHxzRTmdb{3|=Hg;+UYVEF1r_-vZ2}%QhSii)F;fA$Jr_lv}#3lwsrsTO~z` z4-Fx5vJPTB5xK9Lqs1@!6G-+n_Lzo-33C&5@t)=uo+2|nx6jxR37`D-0ecUu+WalI z5Z)|L;5Qwos}zxa0g-c4Y!gEN%wT}r8v;c zwZkNso&9sC&t9E2jYmoVfJwhyM~i@pf7vci|J)gTVv)GO4H;bwPRQp%d@YLvxL_nk zwR4dPjN}Ni6#_5GOVINT72%XMl=j)aIs3V3 zEh0&O2?=3-iwluLcrGI}1}Ro#q70-qs!s+|i~Pvg^oPH$jHkeWI#xN)LuA7)QHQG-2YvXc;l{IpkQ0y|BY%TB?JjLM26XiF; zz)d4f1?`PU2yi?15E|`nq%|QH-X_8DVm|`-hg+`{fW-z78w&&=Zku0FL}sit_I+$H zkliolZYr3OG7T|A@E-G#{iz`&o4pd%pm3PrCBQi-ys025URfh&)WX70+dvA_L8*je z(eQSuhT1+Xv`sE46YLRSxJ0NjY35l41sUtUj&n*LVi|5#2lX_h$K*GKUfZ3 z5zk6IW_oIp@`PY|JzYzZ4dH0RLW_jD$`zv0SCfA9Oeb+j8)lpYna4`NgoGq?VLL() z#r#rewxj9Fee+`ztM^5eo@ApLEzB38bpNxQah9V`JIfhiRqM;oa`vwvD-R+RyH8?| zxCH(0XE}8wisVl+y7&^4CxNif5*Wygkip@>L?j|(6mdcRAdwhX$0d4j4PALWCZR0)ocIX5oWHp}fbFZjP86==_q$ ziQBJ-*nrI%2!F7-S-g^%1{x_O0{B zWAdf}OanBkL1ElOI3xq6mq6D`6gkDtVZpMbgK zztr#l(y2)`Kn#R2;q*5sN|=CQ93&tGzs!Ljj;jDLL@;4b1b;}#IM{w|9{KfTKvHZG zp4`RKl$T=x;9=EczNV{XNH!s{n1;Mme8@`0QmHvIdT-$uBDI1R<$|u9lrj-y-j$c^ zFjUJL@E&Ht%=JCLat5W8<p2FR#gKkt~8dhPsY>;?jcqp z#X-x$N%CBW!V|)*&#=%7Btm2*Byfoq%Y7oUHLD3>o}?NxDH9Fh+7u?|I0F}PqCrq# zn~cJ3h#W#AY_x!IWsoiwD+1|~Eff{nWsuf%O@LNL*9-K>bDUUplLRUi`^2j;axnWo zL|TNG&T*3cIi1fD)fwnr5o0$tgv;+LAYD4~8>hayXoZCH%$*pYAFrD?4_uSo+oX^C zjq{xm|5P|-?&hBe_Ri`n}&YpCs;r5YdSD>0W7H?A+i4v9Ks~4AhO-Un+g_>Z*}w zz)h>GhF4{u0>^GKM?bf^sve`j2dk?x53!l!+W9G>7$i>+86nL!+TWkqgqidacyxO5R*XxsCwCcaZ(wSSd59L7_QHgTdH+bLva#GnorTfBkga?F-%2+B`Y9fblmPGXIgJVt#PJjmmWW)huP%7dM>D;^d;H_B(i#9X-*BeQiZrFnH2OQmss z(490MG}|Qgn$pc?G)M>+^WsAZ{gI1nOC*^{$0Y4TpP`hYM^JR!_67*h^LRppP|{Pi zMQ|cfjQp?>md&Kt+9p6qWCBA#0lUeZu!1sQmLf9JDgD4 zB8V6~w}^f>T(FWzz2kVGjZF9cn#u#&;;>vOCkM%PfO6~rgL1<{tFSQ*BZFBsu8C8T zWt5HrcyLn<4T=lFpi{C?ZbJgrgD8qRoJNO16yv~D*2hID7^+y~4PFQycEF2~G{i<_ z5IaK)O6?piuJuP2m=os_Fi zu#W$gIsvZHs;uLQSmnHhB+U@cPY|ACs*nb8>f`k`g0UEN*a2(U0fy;U#~qA1`yfM0 z-PTCb7@CUZ55jhts*w0Xy{@9D_5{vfDgFXJ%Oa3yygVpF=-%=RjmFRxdh%W%#&dUe z#JBF04%=gb#rEldKpb=v#EJ6^FhjD>7p;gFtbj?*OUmAc>?Fey)o2O^AVh!VFb*dL zO~E8Vu$1u-$dHr1X>u}RM`+VGlbTSRmAgTsKNJnu>Vix0joO6`*uPuY5=+45jC)UY7j7>oa<>e zmJcbjtzPJ;fz=!aH|OyD7B`+MR0?Ha<-}BA2{*`)N?tAO+qj5yW^caLgL)tx_>a-A{=Q z@E+vDJ2PLWa90j5n+?@y<$`=60}zc|4j&n6sxLk`R@_mZ%Vw^ubqCs*#iVFagfC@Q zpb_0=r_2=@4SQoZnJYNZg$1J2E*2UeMDa#bFK0BQ7x<5Mdn(IHF$&wRaDs(bH!@bnYm)w=?ZgYWAC9h zKmc0@1j@BjD#47emboGo=0=+==1QsEV6H8~Co2uk2y^Y7x1KApu}`*&il z)ak%n`}@26X4%!C?_znf%Oz*8G0CyZx#qp2uI6p?oVr@}-iEGpSemRoB3psI?Fr5j zIT4yKW7p9Xt-i|o@3!P)kvo~ur+94YHmYiQ`Z%f>E~Q85uB@H$i-ToteW0Qa3}I@Gt>k-(Uu=rOqgUg*gXG z;JgGSm})jk_!v>h$OR>$1|{?d|4;=#VX_%YHzIQv5JG}_F)SBhQ{t?8f#+#J$CZeo zU~}%?-W=Ew3o@2=Z0^0ucJ9L7q*T}U=0=SGK6cOd=HQMCk#ldJuqS(SU}**{tE)qtGM-vwmfBaowo>>eYwUkxMxxqUJ)IwpPlDoaG8fe?W6Cd1kjSX^2B+$))-6jI6v06Vp~ zwvS(KQV~VzIANqUI;ZE^u}Jl49Wlt5r@g?i zsH{$gbcg!Q+V9#cmtIv_mb4u`%PS{#RwHg^4(NXmlOl-mn`HvuoQh;J)9I52uXte6$eP@x(`Uh>fHz2OQEMoW=tv#~I(AkW zcS;%fW`g%lsWc#s;gslrtXL)Ykp44?4#=7duCcDc)w^mkO$6HE%Hnkau9Pxp+da6h zZv{lLG>ED@z%@0itU3o=-TSLx9SN_2p$xKhRPu~n4l#KGY$^5pN$@0p!dMl|%x-!f`&{5s z37IvNsD?CmhU>@@WlgeK>V(rv7-GU+B*Y6|&L*O=f;uwwlnE$VDU~uD3MTA_j&f8A zSo%{^r=w$lsA2|$VF-TSS^X~(6~vsR2rNQ_p{Kp#b?E>7I*NtT5?*&e6QWR&XA$a^ z%OX_QMFT)#B2+4}r}*cK?D3=FEnw>F7^irCdR1y!Fw%&op zyTanYy&KO(PTIvWS`ukYP*8G?`Y}Z!OGZpgQ6?=HTalb@HHG}oBx2eDss?dD3D!L) z4bO7yQfe10lA&}O_p7PdL|_E?6YDUEbes^uV@%Yz?}J_UvD6HVNU`>rd2*;9@{>_B zz`q;jv`2kRpH)VY4k9I?(1tk8!=fi<+Xi_;thW0^YFkd?RJh|+&Gtkf8h79jxjYeN ztQLOWU3ntn6l3xMX_EM!U54<0R9#q7O6_7vv&LAHVYFWydpSQ(>JbRZ^Xib9$tisKQf(~+2*n5g%uN$*_LBzFi;?|41Lga}8f?83{K z>Ya<2)OU%PlnQ|pY+B%BSCfxj7t0eCNJ^eG(a4|~9(MWeY~6Tpth}RnIUC6!r(HlY z-<3_Eu7UK9;>3heqn~LlUQDI3&<1wpv z0wj}gGZLO0;D74nF{|1PW@YfM)C-2~4zps{bwIjYM85)T+9k6}QfN1rRqrk#9i?`G zbh*a;Vd^{>M@5=Gk%%vgw@hmKsE6!rB#*3E zQ{vtO`%t1&P})lN1hhuB#~u7^k4anRQyp@=8mhBco=8mB$`d7QQ4T6pC`lD}D7si3 zBh$Ku4WhO~%F}yyN0Dg~XGkD6CpW|o*saL4uL>uylo$cVx14y9X%9%WSfJ!f&}lH0 zATo`&vInzx-Lq0Ju$q+P1NKlM9hsI25IMXE!?S{|20WUU|c7jspP z#q`#4jjv3#q8s^P?n1~qttB#x2>=&FTE&r>^Fc&E0Nk+JSA35yf#CEhq8gHSd(do_ zM|WE~XqHhy3}MkFG%u7iBWiXl*<%f<^F6x8eV1a}X1MRzqzL%CjF~kW1qZ{5>_)SF z_VZ{V$ypdFXTl6E^ga#sXyKraw9qGovaxreeg=YjX9k>$rif;lO-6DE?h^7BP>f8$ zo#BWKRG4Lb2?TdB!R}gcr_^rH!Ut0KqwojXqGXfI)IQR*Q1G!MEo_yVL7X7eE3J}b z(ZY*emq;y$*cv(FCr&Fl#>DbI8Rj=XbI4V)@4C7P zSW<5j*cY^;zd_wSK>vDQ7T6N0-WB@)V7Jg;M1f(Ur#Pt9Q+_;cV8M~{&aqV8fdy^( zu(+@t7Dk8P16Z(A&7yVz7Q06ZBrr*2Y;u4bL?NL#=a^tBr2LdnA9Cz)h|ByC?18vg zUV@{1lf&HOq|!j!UeW@8lV{wt#96zq3Zep8NbE&lEnL4=5^Dm!wg$3>Kxm5(pv^vj zHti5VyY{biVn^7;yC7&58JR&2F?!lfI7@*EGy-w4L8N@qLTr3Exw;ng&MxQziKWyo zNNi5g;`uib5sK16qbPvER01`cx9PD4^ZAF=3*2m&puFklsuPgbi?h_|@if~(v?!D) z0Q*--a)cWj?_`dX=RkN*P0i`nN$aNphs3PiVG*lmcUPG>nkr$GL)K1rv54zjWGS#A$a}om}!uXFrD69^20e zWvx@7r*!mVd){j!%e1dp;Kvp}PR#DzIEE&ll z5`RP<))jphHf^9+XO+rTXYlT%DtpnHf2ifViR^i zNlztPzYb6b79Ii5wloU}XdrJp(ar5ruQp9@3C;DFHEn*(s1q#f!sjxVHARsQwqyx_ zbQfeTYm>jM7t0fAwK39hS(9?*kWo+Ija;1}XIPS6=qd87e}ztmlgScYno1u{PI9$+ zD0hsh*#dq{r`Z9$%`!GJL~pgDJ6r80#zP*LtRlN4xnI?4GwSxy2j=EQvIM=&vdQ@b z(NKfQ1nVte^Lctel2%E@V7*D)&|$qZ>ICckeAaqX!YsF8mk{0*NfA0buX4w(7GO^u zQHsBrDxJ;E>Cn}58ZdeaR9`RgSIA2P#;}c<&}hLa;jPH^o?e`NbksX$3fBr5KNLE! z&=R@k$vgaRP)<(I?*h+!Mj;3`sf?4dne-%;b-I%kyqRMmop4gBe}1kErgBiAj9jV-go!r=^pdN;-If4V)wh zoJr&!6QVUhh=&cFev}hVT@3>#vz`dRUJ{_mWtAPtc6W0};4IJFJ&@K8{4DV8`lKmI zic{f26bq=BFl&i}Pk-1YHO8;sh5bjVo$tTwv4$tGULvk<6gkIp%Xri{O;yt(u%W6Q z1giI?QoylA1J4$lGwKACSU*FZK%k;Tw!#83OzVU|xi^|5KOa#l`LA+O;_Q9q5!(!6 zEY1{+_2MmpXDBtM~|392QcLvQ_g+XMS6rsqpM@` zmX1kuWlVblC$X$|5uH3ZF^R5>X;0uJ2GDMeiAj9jV^qE5-5V2=_`1hLPPeX($-^fm(Umdn z2|i)@!R=u3)pc)doRsv(mgzd`s2d!)2V;|qfDLLb&hsuhC`>thNl!O4<%}#%r>6_K zTQ1f=Nja0lJnv3%hZWDaQlx z`>s;d0-8HvoP}vxjL3+S&3fuQE_XUY&z|Qb^H-d^)E%#Ho9CR6yxUwQU5aYSy&j~` zm6NR@ci++BT2y9^9ou*7zUMhpQh%_*c_~ZM=W(-je!mccG51qUDTA`3`CpbvE^*lk40(`)&qcJeFhgwxZ}BqgOf3l74w%e6FqCb zqrh}7-qD-pJLg(Y=+n=4mdYJurH#%(HBUr204ta7&?o)`H^PN?Vf)^eQyZN~n42fQ ze<4=|Jgn!-;{mL!N9Mt zYdn4XLTnSQkS2{Xt4B<;`-$4qUW`qOQpa(14y3-BPILOKK>Eus#&&X_L_ACYf}M$P zre32FyXZPaXuHpg8b}Rh*h?R7Ur5Y^^eEWo?HZ# zD5dMUX0_&(SrxWmNrV`s;Rx-*9xvH(J`6}XTnP-1IX$cqvnLvGPC1Sq9Anl@_{NQl zpgax>yrBzzCd{XiW6Z4NZ`}S4pq%aYN`+yxmQnE-%-J_|7UlJRV<~a0`J6ZeK7u&MGxAR;;fRU^biNYo4Vy2{mhW;%G4V|gn-mLXQn`w%38pP^<) z^+A>W+N_?lTUYo9Wb9WmH;lusDw{4Zks;cZ$PiR%Y;vmTI+m89zzesUXi2BS@#ZPS zWc+G^i9nbafovY?J<&{vLQpWBeQ#RZFVV{rvGNWVjGl-%SCNHoB)}JEjS%YpNqSm} zLNLuZfoR1TmCxU(6$yBZF8D0Zd`d_BjflwrH>L!H32-T-j+4#XwVm|IRwLb^TRZ9N z$>!UL-H|=T?A-EWmloO=1a2+H9;%26EQyG9h6QJFAoX`M!tGxMvobO&TVrSx#Wisx zQppsvW>h$P&%|~5m~9$qhm%t%Wvcmo7Fr5n=sEEp1w+H^dYCZn4C%>aM}bwJI1O99G9@gZQ3v;%V=)OWA(1)2sf>?rt8DBBUbVI*Z&d>j2M#Gm8 zN-wX5D%JHp9HrvZ;ybzurz!rnLfJJi`l*_9Ll3mkwK9X9g3magsrut#Z^gi09Mj}( zK~4KxYiJCbQt;;(e+$r7LaYqxmv7ca1nB=A06BED?O-|3?GUWq|DrHt+7PQYR|82h z#LDC+XJ3AdB^kFeDi_z5vp9E z-B2gy1PL5fPr>!=09*$$OSD-wBMawR%L^o`XE1vNlR( ztmjld8p3tJhbx~#1dYSTOnk&-+jE{Af^uGs&%NP;H-uxDFeYH#6?`$%Ow%*M~M*=9W^ z=ReLiJEZm-s~Fx49F?o&cnvkCB_(v*r{=@jD4O!A*+wg(eV>}e2}J>V1`dn<>&71X z<}&t~^HAjdsl)ji_ zHjWPD0nwaEC+C<`-Ba=u_k$}VI8+|Aqjs=!Oc)Q2{!YG1sikox6LknSDc_KAsCU zI+Pa7HB&rm_Jh|4ERY;XA+{$lXB{1#YnpoD2}=6Ptfp^0NJ(Epge|77UqWC7nBM&o z;w`{5=S#Es{{u`{zO=#QnFsnllv>R*o8Hi&_vV=+yo)M z!*KgTv&juGj9z3;^scvI$RflcS0xPnXt9L8Uu4$k8U#Zd=XL^&(N)3dw!!H7e}l0X z=ApvIIe@VmJ+atyxwc*~oNFf0;KinKLw9E{M&zuaRJ8b?0T}+zNZ9Htvu&2Y z`>ebGRk!+e^FmU8}AHhjyzG8?u0pO><4f)RefUrjqy0}tF-K{E;v!)z!$ zyToj8!yu+FL5#`?RBNfZUN6g|GfNS4Z3#tujreZGl>N0SYE_~pv<|*e$I3@W*o~;! z<7<0*kFGGQMGaxOYKScpE&AGQctfLSzBcRVL!&8frJ3Zg*6gQID@~cSE-G0Gv_8rA zepJD$1`BxgN_znhTxQ-}rz-3e{FWtx4*Ex33B1$G%vOw?wF=Dk|NnuvzQAmD!#Kj1 zo9_d71DBg?yk{71LG^&QuOtUL(~kiGMu@fl^a``)BmT}~k{E9t)-jAQH3)D2M;QBQ zrGKapJsp106L8OBVhM0d%3NuBZs;puo1Uw{atya6L|Jl2gohFsRzQPTG&8`)|w6C-o_{PO6#j9I+Xff3pdl* zlN+ohAyI7V;hWyTOlLV5>`o0Bu8ieek*S|Ac_ATR7c9|AxEQ!FnWpq$|n-k~`-tz-5Orccz^1vs& zFABW)gxen0kiaLT_Y1tRp=}ZCu2Cii?Ty#0@2;|dW_^nvW4swPo=V?|?d-F`DO+5x zcVnN^At~Lq9^plY(}?v5$9npx%DrG+N^^XcnR`(YqV5q*u_vjR?EGf);Hu>rsPEh;1DFX|!43Qc*SF za?K}ogJl&^mqJLws|!9VG}~+Xw=QaQ#5C#PX0rj**vKvB5>&E!3l{&kbZ84?-_?}7 z)odC2$e~mL%}O#h(x~lbXOt@6u3Ejm-K^w#yPTf!Em(3<%5DlnQ!1yb_Kn& z%iJ3?+>CPD^_>kdhSL+fLG|a;m%GiBlr?_rL0E7{I}C^6=MBvagSqwSZu4pJt%7s? zOh>c}2WG$l5Z4LQ`sf~@>_)n4pV@$l_aG+eN@~B?{5O8`_X5PrXzN}CimssJdld{C zm6^5CvBVHA&;Vw*$ zW>o{iHadhrvm5CN`)Bwe=#;;^3zi=;-{83D*+DVETha&&hoT`X5f z-r}fv9>&1BKbYP00nucgfZQoLu^%0+0|Jl z-a`w21Th;x`+fwy8$iGR2tJ%UlhTf1cmt^2F*CXO7`rr7_v`LV+u3EbBUpy?Vk*2n ztUx|XdlOk{%rV$O2hc*)5g(L>bLuMjz%et`9r(@#vftFWK*n)M$=DDXbHcopf9wAQ zRO(N6{A50o^7)QoY@BZ3BeT9+PA~xsj;5mvt0n34cF>-m%oR75FQ_xawZm9qv|AR(W zn9pHlT&OUcfVyU#2dh{^51$8{nL;0&$Gm<{pPx5p=wHpJyMG1gy+Ff%HG3tkS_u<( zyfF|r6KxAHf_2yz+7f>=Z_-Ow(mlVKGg1F9znT5Cwe;!*P?2>s=>iDGcT{`<+WA^K zZ+}evU488QI|xHz!M@+k)sC=wVDp=4%3o%>YYT!0;%zj2_m{a$&MBVA!ZmDC_F1sy z?0q@Em_}bRQ(bG3i0Jk+>6fcmBU>(+IcZyYi-hekkjoId3nD~p533PEd2I`UfnHw5 zK(|rnHbPX3W;-G-JE5)ZxJhK?xsjcrj6b195q)^g+(-9ZF-Q11cks&px^u->!RW?t zud<@hHrKd30?lV!13|Exr^m0EF^Rzqt2450qo@E48(KXD5p~1qLU^VNG!~Uqgd=>E z!wRwg839PGxNa`f=M+!qEfVzn;t7L9A&BICn&|3Wehl+ggBt0gHto`c2Q$=L7m1|l zqON}E7&Xwvt@gJk6TaMp%^1z70|0^$I=zubG5x5ET0oabhiIW+J4UxT#BIoaOSq^; z^TI`3oK+4E##yICv(SFbpef;^I-PZhJ2BzSLd7jw3B4JLMoMWVzpbT9_QzIX>f?uD z;#F-2{Sqcx!knJv6y5c+mua|DEV0{pP}@Zl!o}dE@A8RHF#`jA^aOS*Q0sm(=_Z$G zssETyywJ+i5345ftE&yOl>snjS#gG6dobUwHm{&=u9vIS{E(qQ=3T9 zx^@ss<;8)0SXWj6sQky+ttMznTxAJ{3LS_PQOOrVfO9~vXybCIu}5Vb=*&bY-4MHO zbn!i{EzOA%_1t?^^1-mmdXS3kFSm#kjo?Tfb6*x_j8pv^e?zV~MqI<#7K`v!ygB3V zJJ*n+R1qJ7bA(J(q-sk9o*j3KI-2%LL2Qh8Tf_1n5-U>E>tQ`@wtItUcDV1sP3sxL zzb0eF@+9Ay%v8>S)CtYC$cI%~FwPLBrcI?U;>6wBmVyg$qM@ee6;sUwu`0AI7E#K& z*A_J?GEv;4f4PYsP82oJ@cW74K5ZXuOBBz#_N%s+I%!cHP_~;X_QvO9l_M}5YmU`f zIhj*TcL_1nrjbcl{riP@G^G)T=`%9Zp-!njhr@6B0ae*CDi`8TA}KnBA5>q?s4tFm zYZo>5hy$)e>g&aDyHRhtaM4{!qJHRM?y=TD)OCbo)_Nt0Y$|UoLa8iC9Kw2|f@E2$cdh)Gz=HJiRg zu_~H_H&SVi7)llO0H53%BF@*`<4PsA)<$!4Kt|P^339ilhL{w(CB)iF|E?)Yd9*pT z>~`nZ5;g4ktA?)ct0gvS<0-$k@PzZ|;<;>Xu|{h|eY38-EFBbOA^+jfkTG(;^g-z+%Vg;>D5n-{Kw=Ndu z(O5)+<26GW*w-VB#cT}+IVR_b zL)ticr;%70g|%qc3y`6^Q>mLJ!Wea8ZHiwQt8f$+i7fqIo7H#NREADhm!P@B9R`HcFZN(>Gh4pR`os)ju2?*6jk6NAFf4{WRDP_;P^;mgjyJCLy-$u>fl~m4HvSVKk^S8#|%1zVv4} z{bbe7CfyfEr$_Fq?9fYr?{)mWUlaJ0O4oz3#@`>PBB2{M!TzLzy6|jJ`jecFaHhfO z%PZ6A;{BCfi0T}um+t9Y`F%_mkr1ad3?BExz~u+rw4i)Isld=5<91=j3vO3oll+&c ztDU9x{{opiOa1;O>XG#?VA<+_i6r~!bl^!m;Q!V>@bpUHX;R>6W8mr6z*Bk`A@yw` z)b3v(bN6)dSM*lkX%;eQHSP3be$KL=FLS}68o!uS=RqN~)^N^C|R!|PxXf(;vJ`!4~shbZr?cHei(hP@V$JB7cHB{JPZJo z@^eG``6`|hPJWHbeLXF5^nrc%#GM61N$v2sfTHPpD`5%C-go*9pd#?HS!0z=BC z&hLvHaz8FoKye%ON2gajDw5Nufmb#N84(8}34}s4cpy4AHzZDxF|;#2Y0f}Y;9;RztZ`C3m&KO1Q_dYWIZ7si9ZypqmvD_iwQpRIDqCoA-u@E{Rx%%4ClJ=zsPkL zl|P9!@l;ol5L2dNjNim7FVr|;)3}^&B1{`X&ALH#`kfx>hIO}{KIkU;IP=G%-(H60 zJ?QtX-LcsIpjWz!Hu~W|X z0a^$rt2wZKRZ104c+}H?z!Ylpv`D|>A~3W;$j(@1X&Pd$=~mv7D)sYm4($HK_b~nn zw8@(%cBQ|=vI6mFKh1brn65wkZ>ygc*=BGb19g!0j7VtmD`!Mke5^!S;jtlvVCUel zmwZPxAx6gYxc+4JcbV>ZMl^6Y8+#Bp0I9OW zH!7guW8pB{2l^K@M*emZJ>Z^Rqeq_=>28~E`qs_BXGNnP|2W7%B`gC3RRW!e#RE*3 z=)tfyZ0P?gDxrTv$L$If^r_l&BC~1_IzA^F)Z`Iyl?m{S>u1e8?^Rl+>CfHR{msvb zG}q6}*BK{%c}}D{Z^q;GSTdd$Y4QCKSz6&kphwjIOOXjz2v#Aad(~ht%F#Q|izcCr zBPt(Fe_lkm1KLSfEhM8l%}AR3jS^ zhuXClq20dqg1AZhljgsGMe`T`+(GSL6fW%=Ki@(_UKH^#VorWh46Cy}gu}3NIBpz| zc3b-G(LQil2Vj#0!YgtYY7C%GJ=B_4L~~dV5uK37s(_#9^WA08&$&HB<7yvq6+^<9 zwBZyoQ+=dDWKvqfh%lSn$MuAw`j@@@t)8L{jEx`m6tC--La5sW;iY$82Rm3n=gUP_ zT&}7MO9|)Xc;D_gJ@u0KICCH*PTpaSM%x+czRF2oqpEAHr9`|e>LvR+>SEbVUFP?7 zWl9)xrzCp*Wl=Lx^%EQF^n(@UKOM3 z_{SStIbM|!RLQ56DF~mzmhQ_!)^4OkrQjU#sPP6EjCa}``nO&p>z`rJuy-&Fy7msh zz*n<9!GOW)-lCZeEWm)S;LTQ({r0aBWW6CIJ^WfFKnB020J8oyF%AT)>+7PK9|XDK zH-ez>by3q6!z;Nt+PU^RI?kaAv-G@hcu4Tm&DnNqo%?|Df-tEBdZmwO3?>J`t)>h{ zUxmC1c&qGU7f=itBi{fo7(=b!5LxbFs*OUsqF3Kg70rA@w5=2#PQD@Pxm2}`Eh%q` zrY>L$>p3=_Cp1vhjYnpUk}&T!^U)l{{&MB~tLn(;9` z$WMDh@HG4_Q9UNZ$_xy8p36qM9dD`b+v?6+pk!n0!P&_oi~F$_Ry)ngV8AW6socG9 ztCsq{EmHKn#Wdw@@lwpG5Not%(;25Rh{QXhUd(B1-D}oZrak=azIVhu#)%M1MTK=) zd5iN5e4UG&#>o(C6u<_w^L;)3ju-`w-RE8C?F%kK&8$g_x{EpZ^yGV@xi*;SJt*b7 z=<<7z1cp%h`=WuqZY*_tABMa?XzKgoBm8810QP_8B0c$mV*g{-0;{TjD6&xe-Va5i zsK!vGY(jC0hI}ZRIBk_u< z^s%C@vz%p*3|o%-aIA1rs#BV19}OnZnZ6VO|W)>M(GE=3o5nnn2 zHj`6y8s+rkv2=7I_+j0VLbeNPi-k58AgZ>d1c5X0=u1GT1Dyf*lvH*cmu$q-)q`qr z2Wt7m*JGT)W~viDN_fMu?LNYAM^foXkq}&p$%ozc4W*tzsn8iHrIx^_qeNnGvB>{a z>>wB8#!##oT_1&+>@r%URG+CCqp)HcYHCC>njz7eVETR2)Iwj0gjfNh~xonzw$ zij+t9u=-m6>#DrDkO)*49?li1p$Y8B@kW z(*J!b)>tiGV_te?tTK11RXBgFIF%B#-)4j3B9j=ed%lrNi^suMIfnL(6P9+GJmc}R zfgT-?bv&HLjfWmJh6=`uyC98e6U4u@JbH41V#*UIh+b*iLNeO${orF$cg^Euk~49(EGl(rx;_X+eFbK`qRGF>?)tq=(~vk!&oYx2At-+lAFw z6E$_muzp0pO%iQkOl>?_-0EDv5A3Ke&6p~_uxVN%wVWcNv43#K6!dQbJvRl)=R_Ja z1v>|z5j&=c+Z?~3lBB7slCD$5t@^orGv&#H0<%X>`oiut_75hb zYmM*g0DEK|(Sl{%3nluNzxF$q1`Z*Z9Q1(W(S*^=5h>H!j+s0(2+A zTf>+_kIw`v+(#eG6q9)(l4prFj(l+1M`kHbJ9U>=%vSJ!MFjzVU|dbg2tod;Vk4E>;?b4-qvVl!;NW`vAv z&zknB=wl0lPP`?5CX#Wo>!#1dvl)c}n6f*&LbVcoT2;Ji1Q$V|rp==8&&6LjaiYx; zt@Oi1bn_e_>A@m;W)85fuwe8Y=xp)pLwZ`*wcb|PT^v76$7xPl@`Y%mWzz94Ai?aU zsJWuQmO~@wsvU{1=8E@0Z5nP4-6zwEFGUj<-dbRF zskd5`JP$(pOv;`oZqko}1DyqjzyEoW7{-e96t1eiwv|@T6PXb?-egFsH(?*-)I98+ zjHk5uVsPYyXs?q)q5P%=3dhsV`Qnz2o595!gp|a?q>|vR#&F#hZ)}Q<0vPs=w`eG0o=lAdh_|Qe9+=&w`FFg{5-BS0Xcf zz%|@Knf5uY{t6(1W$w@t?9b3l%2{IXrm&1y@(p&QrY#XS2WRjswRe328Qxn8VmO9A zSqfCmqy0;9ZebGL{k0gPO{Se+Lm*i+iNrE>KBD_d(S$}V!`8@_NmLT_?al&WgxmXi z+h}cpXibv}L^Vh>D+|QbF1A@QqB0<$%|HKC*5V2=$( z)>UGxtC2T}cOJ&k+*P85z^=A(sy;-^#G=d6M`&<4I-%g=D)FcmS-_GJ7=e*bkFOTf zF|}7#i-+_X(KKbXs6!vD5p6^+W+)&XBn9jQkPcGl^cwM`e)THdvlhG5N__9EeL_F3 z#eUpmYQIic&OO&a`#p63T171&-zU=Hb>Io-_2j2Tl9)Prxq4@va0m_M2>Z~hcz5!U&>>(r!J0kphW+>aI; zm5A2B=$A`yEGd^%wWTGZGq5|}#_qbM;;yXRXug)Ij@H{)Y5?D{va4YGdskeVJPK{qA}aBv6M@-cA=qZ^wch>e=}&&e)PY%NYqdASDgo%$tH#P0O}|0 z7LUXtIh0#g$(XJFyZ^`8IXid?-4KZF^;Kw&??csJ)$=HpzVca6AJyhk)&ZdW>X}UTCB1Y&H3dpkpGpsa z59ZRJ2gD@j-m6%>HCdL*rVR(dKb0&c>D)o3-FXg)cOd+ZJtR80)IkQ;JWn1H585Yg z_^rcXVYwDTrcy^9MjaOE_8X@yJ&cnG%L*cnfL_Cp`A)fb%Qjv$R@x?8DX~9t7rlNI z40<|!dQ?1&pQ}e<@tRJpeo!A@`azUx`PBAD{LCQZn79VfD)ufT75>`MBPt{<}tZ_t)Ydu8= zPGi$;&qoTAvZg;XZb34`9$42xMbv5~!GVl>6UGpW*{U_E`}Lv}t)3i*zP+ zKPwV#gV!v2@2q&n7W@+F<#VDbsmdGvB5K5p?Q4zkb)B)H`!Awfq)&c;^?Qk3&e_A*f6j(lGLCWuK@FF?a()$U>D~&gx0&>01(@tuT3rDm zHk+PIJjhv0ydLVt)R(U4o1a9v;lPFN=;HM=72aqbxh65Wz+xe8tT@>OR4D(&v+d&YuEqrjFb2&eF>ooZ(bl0!0^e_%gKP z`*jKJSamg5m(eIWO_!g5BDZkJOBzUVlQ3CHieCzoeVogA@M$bX<dep?3BE>tfaNa!(|#s@w?&j<_F5K8wasMog23=zpn5 zuxWF@#2(iA8;h*+=i(wumr$fjR>S^BU6)LKW^IUd3oA%h60jMkRBok(UP)A^^%TOw z%m@Tb%uEBZ^vh&i6E>Y;apL_3MK>9@!YzBD`!@GdpklLi?+cggj19u<2$>bdTh&ni z1=N53&^Q*`&wLor-hLhtM~hrCgK{HeDiD2s1RybiwnoUe^l1i-+9f=YY@qE^_ehx$ z0Tc%Ao{Y~VEk-8Lv`E=m$GN-nk&@+pZ~o@JhK&i>pQyxyr=n#?+$LBQEo&s(oy1zaI(*?v9h5u=;?@W_mtO zR!jOj=;6GAK~K|>(_WSlz}q!(vJq6}^Kmi(QlZn93f=KC2dod^uH|q7c+bTH7Dsne zbb`MRlAcMBUj6EB8k8Ux0vOE_0bj5LjeYDtL+8D+CViYJA9oI5q`t{UYH3P7ZPLV) zcR{iN1ZvVuQ@&DH*)|}wC}EdRyTQ!Dw`P1N-e5@xP|{XHrngp`oIWF}l@Dy?ZDxB0 z{QJ1dB&%%3uxHsv1i+|)L*s<3?%odDx;-B-lPeSL5g|>xUS%UilmA?=GNc2W`9zGb zURM47qJ~mdk1Vn4Mc1fZl=RxBRCO??4t*!()7l)W<&k4t>o{Mm@dd5%0OyX=QIBi_ z5@ROGT0osPNwNGyX(4PL3$yCr)!}f2@vHwIJ|7P08RKxdkP9#4wcFL^&qe%d z?7ft!(rhZKEkDyfr8lw^U_Z~2k3xdb>&RE|v3DIgA3xRV%F*~)SXWNQ&pq|z1pFMW zCm+O5^ZN4b=ue|FLRm4hYg|=d5>mP}kbNRm3T~ur-bFha$oKKNJR2g%bJ_A8`+1E2 z)6j;M$!R(A1$-ZrBhy0{8kstcz8F)Tw&uwG+N^?(jbsxC_Mr;~G?j1azU$FdTog{x zJan+ROd(ebSt~IyBTOA;?O`oi?q848sD*5<&sk1ATFCnP+U1njLT2jAm(!{ivXegY zJh@xStfWMs1)LM&Tx&RAQ=IL?1@Q3j5;A9%|mdb)+(PwBjQ~t6x&)RVuzzfYuN+zespX3FSthd zp|$LvS~w71@ap@$^k4%S=*fRnl<2-~uoV8}92z|y@5=b6%99<|`f&w+w2|jDee7;J z(N;FrKiy3!w}73^-(Aq@7Fn+8#nULqk~z-3)B3{~Aib4rj-fv-*$Qm<$K~khu%(W2><-CYlvZtGo#|wyXLA%&5hh&qV|E{w>DDaF(=(z_y-_ zw8sg_kLg-_%=Juq`&QZMrd1pQ4)(@Kbu~K7`gz4ihOr~k{urU|5?i@^GAn{_Bu5$* z&iSC#mv5CdVVKLf4Rmu2-Fh2p-$1Y4CPn%b-hbd@U=hl_LL`2%UGWK@_h2VZvu;Dd zjkNhTnUZ=c#&_8{0-jJ9Q&nC*Vq=_~*g@7ul?^(`n_>Cu*+D+3EiKsE0Z6D18%j6b zA#c(r52csx03TUeP;!TSPSf`cp%!<^o3zsfuiPbvYWldL^w-^XzMOji`u*g+SN758 zxGCdanMiZ*l`?9uTX*)=*%M_6ZMzqQdL~78ly~43#Dg7WZ5-s|saE?8kQ*~&n>%=1 zo*q!lBaOMy8BtI*72qqMkCIzsv3gyW2ehT7wl(@7>Kcg>7x zrguY@lgPpj6(WtTi2Rug)!Mp=VB95=vXItb6<8tp(+GZ8;+J5YOwgaF~QKY?fC<}i)aus zM0u4!hB}UZFqJiTefn`Q0M-+Bt5enZFatsENG}Lg7Jg8Q%5!9Kl zb?6ITr7F$SGL7aAc&ms|_X#46fNNFXc&|9QJ6up*=!eWC#^@?L3`Labh`!!%r9*H{>NGn94xBBIP$%Cb zjeb!qgTOkdV9Dm42Kmg?<;QSIJhuaQJ*9Zp-M&Y2y0GXv@=L_kGhJ%h&LOqX)J4>@~uA{jl zpNBuk*9%1P;a)Hp;Hyo|i-RYWaTYMZ?P_FRgv>f0q*Z7eYD`cFxzv#vrV51l3vi4V zWSqb!vru}G!mKdk09w;Wq<4|o!jI(PYQ1APSgttXd3zVAp@ZnTF7kH$ml(RgBsS3< zJY&>7KHft=b&)YphJWuOGotfPfn_wd&YT3(7)jM11YMm&-5->7V|Iqz^_o)2z|^4N zeDWZ~)>X9fK{+!5Nti?adi4>R-4Ir5%q_e0!#1(8$w=nG!{vg~so@`(St2eR+KwfS zRrK9MAoc}x@*&w!ACN#v56iu1^!&q+wl-0CSJ^gZM@UB|4B?=Jp=jlSt|;(P!8={$ zds@}T+n_|NM`aD9KKZEZq=D-!cudw}p<~ZuvP9Dc(&zt%xbiX8ctXCCz9ky)R{$~A zL`P+Y=t%ASxTxQ6GRl&f(^b#Xzh?lM9LyUcl7rsx;p;qz(v zXy3y)8p8OD3!L~3wIX`5%7Hx-FakIX_4^dy=?9g+AK%$C)`GEB{SHR!R?$ItAy8iX z{!`(uC=AkXeZ_^N4CSSsC9sFM+1i(Y%zIJ8uq8{=Qq(t_V4g0Qg z`2FPO!}F>J=dc=t3yvUzC;n-HpLvsh*g0!Q#ieS^9bK!w-e2|gPL#%Swl0o@=Gj7> z0@SP)Y6XNCJ2`4(NJA|&6TV#FYzhCe`s&>*y$2XH5DpUJGnpY)c1IiWdZ-nww+=x^ zkb-Q=pE+6pCkN|gzRmm*^9;Yj@U7)*4(wU08FfR5d+qnCt{{y#ss^xG**!Bu zcwEe1*f@eZTl6q?vA^tmjlSI? zX4iT6gY^g9FQ?Q|hpxON>*&ZoqqoCLcf2gyhNBSlNH=Z&$koE7_2~hhpT-{A_Okpu zIH@tsdj%Frl~9YyUy&bGO`Y;;WopT*a;Y}3;NxEMH7)dOGpY%7Zy#BMs=X$|;>)7J zER^HmWF?Nn;N1B&*pYm12l*|lo=s0dC!1zC)Hz#re+aO&V1~)O6ZEwwF4Z!{o&B1; z2pwkX>#{nW{;hi*TK!=<_`38&9SXEnPS;J33iEFt$*;cVye3+&mgIV~yEH{oj>I zX)4#B;r`0}$%_g>6(!C14OYkl@5oO7|7t7b+Js%(mUra~`k6v%`ks8H;rcD$f00bU z0=^^Ay(Ab^jjcxTw|v>dx>5+2dGbzhg5>wXXtz z8uilZ_hDKrqN8Q9AvO2_Q&vpY2O#jp^vDM?HGPt@sx;Hf(ph|jvpD>!kzhV~7Lq+p zqxz-ex{uWlmf{lXwNuumD<4RM>rD2T9wZuj0+Y(~P?p}qI<}EoeTc!Hr$;}O4Sji2 z|2gj!hO&d=5yj}wx55>w>QgKG^tng^|2V}n3}b~a?S=r1+*7YbYu6Mf)X{~MItomALYrbW*E5=VQW zU7^KucyBc80&ZTY9;hmIaTRkk_h&Z5167o+Ku1GyMQaD$+z-X}1m0D#3ta3o%{^Nc zyBH|;^YTEk;aqIcG_Q`Qy|%;S~!LlIz&&^+|yMPCj(6^`xJFJP-YU>FpYij z^V?c}+q?tK^V?7SwtXRxjca!gK7}uVsxhJXGVF2?DpnQ?9Ojnd~T|A1n zFx2oN*RVzn&hYh5)i43kC+z+OYWSMxiOX!^8kQBJRqjItzy10<=8V(CM|c~y7%PU~ z+I%Ft0}H2pBwOHT??*7c6_IpFF7`>6%Zpk=bg|u;z z^;>E1$1)@PziDRgRNC`@C{uHg%;@yrv|}&fD#NJ?fl?YjNIu|XYG>@Gvx8*1lc^b= z;|9z0x=gK%-8NP_8E-~v?$P$F<20d71gCH2JoLz5_^en3JO=;m%g_D4>c`KY4whY+ z&Xmb!jxhLM0OhJjPYuDoNHKjl1O%^;CJq5>+G3NiKZd|Sx0j-Z%39$Um_L;nxLF`Q z*tPFGR5rjSS>K_Ga4j4vlW_4s#?YCm(iMprQWh*@FP%hL?<%`&FwHLun)!W{Jq*%+ zk(c~hRf#gr*(a}Y>5H1906>b zL=TMs`~HUB9U<#RA5y1(fuOnR7LE{%kZI|@PYP2(2W{RA77HE{4v3r?Av*vRg9K6X)s#4^-_admoze0J{Mz)Az;ARLBdeAF|s|z*nOX@8O956 z3+4YVvuVm0>Apdxz+`cs`+Gd*9cTt4XPLxdRKbIjrK1S2j_{u`*4c=%Th8rAHs3(Xb6%x(_&6#-UZFA*0nk-qN!_6mV-e0zobI2yh`o9tI7-vJxs%&m*Q!jWlf%tnFK9%Ou$;RJ0I+- z%xsm+qJ2|kHCjDYMvyj5-cz?6tI0U3uzyzsFSxhe?ZK0+4*S_ zKl{F~o+eZ8=V!>2Kd73Bq)y3=K< zOO@caPSde#Qb@g~%a`>Z4LUL%TiojP4w{?~o0k3U7SdE5-)3?BR0$ zqG@Y(sd_uC4RCE73-NiBwslCRiY(Anm`q0x%#e>Wq9ASnyaU0~hloR_TD$kmlsElD z3J_)dOnF#el}CMM$;UB!rL*J{3BZ&@>vG8Lis*0+Q$C^gv*r8M$78yn`?I`mHxY3| z#)Elubhd1g`dcA%Fl9vYWkhhqKxcA3wXrr(hn<4XJo~9E(+`#u-13>+r0G*vk!ubN zir0z?TFwD)=3{R&zmQMqXG_UB7bc}5dSEWj4iwS5bD=Yr(}cOQm@_^1B{p`7Xu+5A zK7GIry8fjcpcNGin1|yAIuyQw`SRQPMQX~Yq(P8Zp=7{j0o+KV-0+W$QP2mz`~|X6 z5cvEl=<)Fd@|MJn2DUl6; zII_OYZ2^8e7t1F4Z-wOg3VViF6r-bqPU996j;p2Un<)N(KtpO8-+lP>93_Sr*CpkE9@RDT=n}JFjV z{YtC~)ZxEC7332~8(Zh0P|ypi(BIL$K3=PuL|?fzmT5u<;5V|6118w-_{LgRDc%mq{at zZmPlBtn9I6GAZTn{b6Vz!>o-odYRIX7cRphJ+hJZF2fPW;8Dlng9b zC43Zm;|frZVoJLtJrh^Th+5OR^6)B^^KlK$XQfs`J*>D5SAxlX!FfY%>qz&NvO#42 zLKa$-{+zc`rncIl!m3%t^YB^snK(GeUx(=D+%G5`OY{@K#kR^m=n8Ax5M6#j`PeNg zREl+d5p<;pd`$XAw)_W4uL-T$pjgbTZ?F!w5VIGT;s@3RqU+yO;a@yM;1t&Ho2cok ze@wB>HIn+Rl8<3RV28@4$Zut$UTlzd8Io|lcVt9s;JbBn6DUwZ6<~4oEO9Fv%HPCD zF66I%)VK11;KA_Hf!LZu)mF=RdrDHM<7(`=_=MMCtN$jvUPOIcHqqSGe;Wv=K>0he zTAE=h#uS|&CQU~eOo4H0q&Kmu<&1Ot`}O!52oHrcd5!E=6_&&DD0?lir3lwzq^YC! z=hxa~!)F6i>yTmVtEynrT5OEypRO&aScfB2`n*8}kA2S%zYHQgyiP4Q$PU3E@Ja50 z`7GWblYFE1mVpW=?EV{2AsMio(tM+=)_gdJI5exw2nW4m;Q@Odm9{#T$61P3s4#0P zjokP*2y(T4nC#mqU$r}GuN-9=dq*~=A)BN-B_NI|lPkk=`4n`+fo@RFO6f_P$7(^< zUNJ34PV|a#%A>E}#5O`%U={*o+NFYf$+n0{s;onI$k zqruyx6W2Eu-k8 zQk*l}OOr}*EX$Tqo#Sn}zM>SefGv%(MAU z%Edf|seXD9%dwxpq%q^e0AM~;`f6f~k2<3WuvS?sI-nQb_u{JA)}1oW>+83F4jE{` zRVAWfkw8ekvQwr+RU)NEiy!hNdDQ9z%K0Mf=tX%CH}Hb zY>z#FJ5{7})o>t*%PmJa+@q{SkhKWSfCa*G4RWg#I~x|tr^=2_p}TRC=JHMm;2FE+ zFMy-52XOw5>g~Z+Z86=y2dYXDy}w5p?-uWox8vj5ql)HR2SK$Il$0)3{DYd)mwr&{ z!rHyq4J@MbdqK&2{s3*PUYYEFlTwL*&6$w^NtmrEVwRvd!m@0vaVnJZ%^r`&R%u{g zVhf_OE_K+atV^&mFxyd<@q72mNLsT`Mxry<4%>>MRo#;qhAGC>MDLe3Ic;+jKYOU# zek{8p>bD=(`F2Mz^5y%n@Cs>BIdt2=$X`A1e;9dEz^=7!ud-xSj=$p}+26Mo{e*)K z9+GtskS2rqvOQd0L&}`y!h;&{JBR%Pt{DbYwz0D=`3LP(AOCUS))8e>d-aICOD>6k zYcik$qX{ovn-Z2&@eu`!b4OtE3I<8{@_zt{=ji_dB&<9B4H0h0VYYW*WDSwF9! zZa+eZ#_vG<;`8S}%6|vs-9-W}L4;3H;;V+?FeG@7!GcgsHy=|SdhwWSn^dXc^Xe>5 zv85c~7_B`9tHWmc`IzifdmWB~^|o|YftX+;8&0+lvg23{23)7-i#gmlMvYJ7{M~wb{WO%f4K(RA?u=}p?@wbPY@y#ySAhkS zXNCp1p!KuNNDYudL^x4|Tg?gQ05|*aXIN>s(Da{Wjy@xgN>QLgWdWW?&bW}pSr!2N zR61N?dj)1Rtcn2RFEDirH9v!Ss2rg`C9jZrAO)MZ!_ELvH_(nVAbcAs>8v_g*!`>` zeEDZ(R-4LxFxv5rZ(d?()TlIjV5i*vhQB3BP%Q_Wf$@9GSy_X!e!&skLYn)F%&=`a z*vnG@MLf%2lu0$?icF)H=U{(@3e>fsrw;Wx_cs+N7rUPJMy(meK1cu*o;_N64t()A zoj!--m_;NjWNT+GoH%2;x>mqyvUMwK^mL>`%CHjj`cKNMsIp}YqlLwF_lF&#q6)P9 zDZr(e^HpRyf6Dwqy7j!Q=WnpjdEBzdT|pDi%Njaxebsq|>j%#(QtAE`Mth{z`Biq& zzqm?$f5ma)1y^bLuQ+T{M2C@_s&=3e%NYB%L(mU{w4?c)V7^CUDW#m z%y-|9rSTV}*RgNl$5e1ZR#OlAF5tL}lKdOC^6_z-7x9rC7?ynk0z@hZ2vs;Nrl!); zI{zD=r4@GWU_%~HHjCte*-(@PMFHHHKV+TzSNg1hobjScWM1#v5%FPfkT_78CD=r+ zhz>X7S5&9{fx}tF^yZ&9I$ON#BJ7&ZT^pc2*Rf^UyuZ|-m1^a>1UsP8K(=N;8T8Jg)k0GptHTKpKkO?p@^@3@?>|X-R@9()HZwpfWdHNYQK}BX=se*z2 z6ac5}3Yf|kx_m{pXmL5*ax-PA<5({5SE>U`hhT}A1%yK3gSc44_{5X|%YhAcx#`B| z)blEq&m79*pS`riAb=0(CHt*Cg()1Mp`3lFfmh+m?f_ipgSztl z1k-byRy5&J&!^g!iJIpQT(P-R^W2(aN5W&wVu0GELg6JJM--L$u@LOEOdx?tRWw-X zH<~9E{VdZwIiTGUx~DPdT^rp~4gSy`*F8z;=yarQ733Wc7^xU6%AmsNNdMM7)oGdT z$;XO%*x_jjSd4LanCs7XcxqFw!xJGHnH{(t;n&l7DAC4By5R8eLTnbQiuDThv{w0P zX{bl?H2d1+RzXpDnvaB{W2@*oer)!f8Rn@O&Sb$lHj6ric{)B-2>}p5l;p;OA&?6Q zL5Y5NJJ$r7mx7e6#`Eqc4dc9pxzbnVHNJ+|7 zM&wd+_=d|m(1|)kJ2?mx$_|e9Je#m6z)(1b7~(-;W8!+W=V@O;Kd9L@Ip%~nVmw)>e_D*E z5i=Xm!8@f73;e=9(jc}X{p_0-i_fsR!Z^I@zf>6UMq<7z>23Xl^Q3VcKp zW8>qVK5-2P<(L4Mu-T*pb*|ewgrW>jGF8NSoI!+;57aSKfK`!(hwVl64Nnqp5;wZt zlSq#lo)&F)ZGcHH*v#no&+UR#RB89{1}Zhw5Y8K(-~(mM>3t11jq_v#*=0C|t#Uq~ z;Iu%mIM1^|8GL8af-+RZc}zBqufEfh67C0Kop_Jhtcv#p?_V-~=XRG(R=JaXxdZkl zfBFSqZ4kKBgd9ONX5}`hVxnA+J(CIKP4Kv}&DkKq(;FK%(-S<|fMrpFhxJb!U8(2h zsr8Eh)HV^ca|=C|=wY>ZW+E`7h_>POdC z)~6=YlX=57F>FNsp2Lzp5Nf45OVcV02{>iO=V|_{gD#jUY~iOgWlKu1WWqDfG7JAg z*T3PUS_L40SwqSH?pO_Gl3RmgOc|Q<-!5>@TP4vCDjwWht5NSU}FJs zN>e@W+0_FZF-xo?Kbq!AMyFvtYe=)xJbYqtd739+VsWQ?47R%6w^N!<7LRb~v}L;I zkv4-_dJI;&P@SKYm7$xJqzoNkk`g$PF>>?&$JlqkM^SD6@62wJO*Y9+`rJv|-2@0F z5PC0z6blF_Dt7D|Ea3CK=TC(M0vHt}K;Q%r2u0}-EG!@;AVNS&Xo`v&1u^0if(5al z|L?i8n{3eM`+xY5*_qqVJ@>SG?zwa@<4;{=Tnbu%jZD4I&^a?;j&%h*B2LHhMn=$YtqMLD^jrDm>RYMeZYM;@{Og+tcWG>#|XF&(~?bG1f zcbR%GK%uQi@6+@O!9U<)5wKaY&cRo%{7U4}+aybA4THPFk@KVn)qOyzUNAubZ=4ql z?L)fD3mz8`9>?N0-Rae%X|Y!yji#}Y*TxmVF05s_ZiNpV z=vO-A1HQpm$fvhJn~7O^Q?(Wso@VLkXmiKSdJ;X6rAs#-*v7n6mZjf=_OScWCMsb4 zWZEI!1n$h%qtb&uEF~GfCH>i0Ny>GbXvqfW?=snmOxWn>`fY6jT@v?@MNw))=R#;Bv$NOgI z=pkMn_1g-|m|MqE@8$sD^*MStM7o=D^kz6LXnAy4%lP+$G$lvZ?Tf04>97utyZx^> zjsTR8bHKu?+FDZAeZHu~h_9f~#eGbA6Jvs;ec}<(=Qe7J$S- z8q`AXgalOXW!a-fv7Ag4qS;^ z(BhVQch9k7Wg2_DLxn31{0y#o%mX$nFSi6exkP#^y}A0*GPC zYAb!Py7E0b*$VR7PjtDJ-l}B-Y&p~K2SEWML*ymNv^k|7+7IXI(JW*)Y&Oa2fs^DB zkd~I@>d66wOi6ZK;V#ZK0s5(}b%A5PVDDG>fi|RH34Tri_^)lPw~TA>;0G@+Nr!o5 za%=rT_1!P&Tx;E?1+k;)PYmq9k_1G1bzTV(lbJV+spo zawms{kJz^QaCO}ty1y-Mdw*jO&2Ot`hZ?(hNpVBRSkjkzQV8%_gSP5NE{I;hsR?ws zt=>FH23ob#rLE_hc4%e%Zd%w5txVo+?YN(BCmHj7?R7H)RM>JSfdKA`HY804DQ0gu z2M~N`dwnDTkkvu&!{i{~dBW%gnI@3u86>Jd!P?XTWT^U8?4JEZj*fau=xBKz^=#|3 z!cTNtM?EhAI22L`Go!u!`5pD2RdxGZoM#7Vm4yVI?4lb|?Sy@l7Ix8-DWNMgjlDFm ztG-QL9Zo~LgAitP(>q`*I3JAWmTtOF{UU}uUGaTJH~q1|cSm=8Kl=*hbcYi21r6!0 zd(|`3=*jMSKXvmu+Spwm&*$2@_t0-tEc2kXCZj;Cv6a_?cK6iTmh*8>i02>BuRZmg z5E+}rV4e(y2-DdQsaG$ln`igZv*Q6xMv3}O*%x+GNiV&!pM6g8szHBxJM39>p;ipYQcq#pNQGQf6S+)vLO(O|m{7F|>CqGc^e z3-%UNa@2|Gfy?gtV@!Yi@$6uB1wx~|TqN;F;29sbJ!bdUTe)Eo8MUS_e9nqAxfRmA zzuq(=!qonyVGrby{t$EPi2Cd8+BJ|~Rz#V)Fv39<4w3zXD(dw8V5m6KG(b;DD{r*F z5X>=){q&1`pq|t*$nThiN)jbN4&AdkHq^+*e zA48px*xJ*9YjpPS{Nx%v)8hG7#~C4|1lQqO5Cz2azqS!I;O;`0kduQ+0yUU&tsc5< zZ9}+kY>@7|R`$Nrbrw|(A?$UsG`!qt>UCIGb7>_W@w^kpL9JMgtwXq8c;TM74%mB$ zenGNz+zG3*7M9F9wq#9eP{x0vj)V1{ttxn$LroeExz~;5hy1WphH!ER#SMmB_WIyPMhc!%aC`|7UbOGi zxxsoy$_c33xwd1emNTZcHn3?zS=Z~Wcn5CS^%B*_T(4)PuVW1;Hbgvu{=*ZE?&9Rc z`s-nasoYEZku_~=5weDbWCcB|&zUw<7xs6HimBNUJ+t#7t~Hc6;$ned)_OSyP-!G9 zuh~muhv?1Ir%983NIQS-(!#y;=@8wQw*G&1X%o8CWvHY@JSk9VpBk!nu`@W<4b@-d zAO0T0z@5~R@ihGgeTBMo5B0cF0_ct#bv}nS&UbK~kXQs654I6Vz6Ryotarzvy8mWr=HY{Q;m89LN6@HPvxAm3kScG6 zHdl7D9t%Z2>lS^Ix^o|`yhTsz`X^_ojZ89l%jEz&^@UmlnGYh=g=DNiqCai{5*^-0 z#t8lHfZ{H3mM=Jeegu{S_P9srx8R}Q3L#`44Z2l-4zSvLtKNp~X}{bGX$!kiDYr?w z+4(j-kA?Aoq$6bnNOG5}{LkJ7i&K{>_H)I#(->wND!4-qw06_SK{cfRI{M141d{3b z_?W(wF|>XD-*4Bm)TGt8A6s&Lg?0MqF%=uQEd$GjU5#1`w_Fk}_(p_02H zb4=Ys@7}Es>0Zk6q&zquAUn8A0S!Q_FxkNz7FhgI0^Tb@Z)g_OVB{&Wyq@4*Z&1NV zJr0nVKC+SZn8(VQ;kWW~Pt_i0#ug81ovHSGd{Bq|T@!425E$-LXHN4J#KwpEP zVrw7JGmv)R0lga0s1`g3MQt4&e^9?mIZw(2y%Xg>q`M*~TL9JOE*tldt{{5tJ%yl| z&r|usy4POpTu5z3>ponq^VP#J1(eg|QAk}ztsl{ELpX&O9>J{tKr0^6e?z5lll0#1 z`uY;J3yGRlo#S~Y*yQxbbdRIhseMPSN9$RrQh7#i;w#75bexUmL*yQLG+p~%o>p&e z$CXRX?K(Edc04B=TmMfq_B|c`=U+CqC$zCW^^GAo-`_TN`0pA!%#E$3Yo5@r|BK=M z7~0~G^(`VC;*VwuL*4WpfjeT9;*xCQl*f)RI*f-qRS~@!xJBH_J$1RENsQZY- z7;H^FJq~O3d0H?IEVh^`$3gvnfxaB4kHB>ID$qX*AeCdX6Jiywd;9awm&yFMo{5$J zid^}LF90za4dGXhkP7V;oAzNM4SPiIocL#y{Wy{C8LPiQ731}xs2fD4X9InqffKNi zYY_yrGd0-GydZ@cTJ)FXVE2_{ry@xX@}37d*iA#9$Ji?;$_B3_K<7iDeV(D6;xsQY z_tT1L@g)5lfaAZkTK9LY)^V%9gh20?AoQlmQ32S8i}mckfZebE6?T^`*!e$sL7$|c zg8N?52Vv8^;w6l-n5tjWhvR0SCR1Qrc$mn-oip?_s-2O`hOmBTNYCvT#ViXps-SZI8Fo0^bh&I65kxXBDv%XDf2&O2~XCAT1cy%12O59FDQ1d z-cEh%3+g);k_S9%=j#9LP(O5cVCZt{IZRp*EOItEmKINr1?#!st@Cu@kih+D z9t=FoDE$>ZKQ4F%NKQD`;CUPx)NhLUIR+FaDo3qxX!|2+^dg{4@_a~GFHz2Xc#OcA zVZNT$6o-q#5pRmo#lnN7Q7**Pi!zIWo(h7VJQ}_OH`7WEu}#VvD0bh0zI*9HDj-%PhbYa%?MoGGaxvJi9ClJQDwAtIGB%4 zEYQ=MSKCmcKJf@AqEK{_D~Jk#Vn--_q3+R+#s%;AwW$j7(BOr-?%+!3k%fAbsHHM` zVhrPK*+M|1y!rA6+{}Uo6xI^a|#eaRlX3u0%`-7gpc#I3Js6>>LRO zrSVx?5gRQB63nuk9xT`I@SJrr1_yH>+*+_jYZA857v29(;Mw)> zLFwlD%jI%t0Og0kh~|t;3`#4p9+a93pDiE+SQ4yror6@Kz6_s4;B>H4Aw6SA!aWQt~mr~Cq zy2!o)mRQUTOKfq-g^eJjk=k;4W{Cw$rn`Y05|)f_)^do3x#Hpy3Cpyl`a>{(OxHIPUK8;78)Cz0|48V*kV zI%>O2&&j%if|Z1Uf*}iNB{oJG$iTbjmSHYJc|(A7N#ZUE1mmL;TtPdQ=`Awl;t8-4 znceyTR$|o|wOnr%_kpC(H4e0CHRT_;Tz4o4xjASBG|k_m{9{*u$0LNo!Iju$nnt}= z>F=ux-lF8ydPjBHTQqpJelJ)?#cI77}VftZ7*H|swdQ#dTTjFSa_CmZmSg9dsKfbs|W{%uG;GbnQl zB%bY5v_*eBV>aTS2l$y4XA#^NoCJ}*uy;L++*=_-%=TZsRiCA(Z+wNba{y}1SN^2! zP=wXXD`{}0{#nM1*ATS?L2(nU2HDb`ZO$YNby||^T3VMAl|*;%fB|HN-@ijokb#W% z@6!LR;!=oydr;F1dUg-CS7y=TJ>WdE>G&RfjJj(RU0tQ;#qMHP9Vf0tfv%6xdy}i6 ze3#OfcqEj@!YdHqk-UpBsNxjeyccc}>!@(A{;aZ)MmqP0s8qf*`);>yE3(SRc!v7Nyy5gr#|5>>7fS1tk?4BAKwhAgUvd0d^;JUn7L z+jFekU63gUzT0?U^@QX_Zkl>XH=1$(tgbP24mT6?y}(ECAFmTw%7MMB5YaD+F3gKd z#Qkr5czg&gB1h|sxDL%a=$U^;rDaA8=6S?~55c$aHGLDu`C|HdikF$Tfq+U3wX4=g z_JtJ79_0)pS%aDQXdp5mLTw_#tj0^q6VA6>M3{+>TLI~9Cc>0zj1@?tW7YbgRNVH^ zr!xX~+Sn8Wb^~-n(6z#ERTji7@31(Ei6o!Yfu=!3)hSL*bY z{>meu+iAdC7{Fyku=>EV;p0+(uZmzxHAnSbf1jiBm_B##P(>|3@MX^^pB+eJlQDsL zq&|gF2Gx5M1hi$lEOzN|wnN?%#`A0E`DEK6+OuIk@ykQ69LJW<9NKpripm`N&vE@{ zJkGqYSLz=mU=S)mCl})=O};A{;(>plPf+G8Gf(J>IwzKHsY~n!u3!A2{s_NDT}bSs zEb@2w2n!HZt@~8>(X@~CyOlZq^B?QaD6p30f1=+TyC~KNd>I9>g^j|UiywUgEo&*o zf2zM?m1+rI|M91=@vWoq&mihwpsPQFJ^BJ|{tSLJwUkz)->SS!|Ekfi@-inwMF@$i z&4>l^INK=LnvSvYPsB4c579?8`Xsj`E?q3z;=tTI{yFxtXZR0&u2-tOPz}~1-bYA48QTSH2N341mjbFMcK2||5q^W+4R=0fYn?& z_bZ6`JTiXMTPK~xn!p9To;VL~Jpqo40@9wPJAVV&IYZ-r(-E?p{J-fpqlMVZfQNfd z90F`djzW-l7cRg$L*stePvT05+yB5uz#01IANoFJ_N+bv?acZ=(Oj7~Yl#@45nEdk z^V98dje;O_#_v+Zh6q4>r&A2XBPvpiSJwHbM2b8GuUn(U$9SFN5^eZ(Z?rg$$HEvf z8IPv1g6C>jtf+%;RA0Balg*`W@hI*-h|$D)ylvOSoB|mE*yV{~yv??`@~x=r8iIFb z+th-90bF6>lqF5YX5%Hts|l1@`y+$q>ZQN-&jwAO(nz zrY1QQdjUNfCpx2pOXEZ@M`@%thOUnnohdzDG(|`I#fxcRMaSaB=7c7^DJ|)$#T6Xd zMOu^~I;5?Sl4=i@9>VBX&k9E4$9vT*x{x4x01T}Y#ZWv762%nUe-ND{IyRjGtrnz1 zjye?!*BX+UV4j%;$;WHHgNHWT|4@=hRU&rDP0&Ibb16=z>hg3atwRJ( zyo=wXQ(QQhVK;YMZ^4RnQ=(laZ$cSS{?m(vMlGc($;+@)8KB-1KhVl)&d`M9^l2f1LSL% z%S^-Tk)LHjxYY9~7FY#w(F13F(Rx#@YECVZ_gO4^>l#EYZ%F4HE)lf_osL z;F0EyuHF4IPWYp~sQ$Rr0}2?&3C9iG zk)DK3xF{^r!*>XGQb&8DuoSIT7=^&#EIz+OJ+nnSm;y& zM0ARd!zJA)g*r4p-P}avD(}$5CZb*HF4^WQ3OA|*8-=hI>4~GmO+;EsoK*i&HVSe# zYyi8Z1`tP%rlLtqENBT|TEL=9G<9k!I^rDAqfJHY1l-r4vfN>X(pPazWDl)r3M4_q z*QVk*2&MVWL`#Keb~8}JlXP{CXp+UEa4=%L&o1S58DQUOZc}9U&5cx&BhrR$2V701 zn{AgJU1m^#(Id({1B?YtkBs{cGaP#pUUvEv%Z|YS2n1T^dk@03_cqMQIpW4q z0PYm*1;gXyk3SAAgsd|WZ^7vO_$edN$KY8Ao1t~5LMbB618x8!4rf}%Ncqd;ZY_o> ztLUEAU=4HWLTk|$Ssw2ry5ezX8{l9Cm9!B()aqC1U>nS{O4sEHX+VBBPke#Y>)N6h ztEfcHz$uc&@jle@$L_cs6hHq=JlB$E&q$`;&&C_-8}anVxcEbG;k~ZCXbX@p!=xGwfga z+FEMeQS`z1p6G}scF~fKSQ}gDa7XZ$^K`DG=*L6p&`GrR!XyqfGjaKW3}R>rk=Txd zXbT}ygN?kXlgL$%&GNtA38W$JO?hQ!kh8^VZ~6y!6-5et|8Ff${XLP&`-&R?iD^B= zC;;K^t00aPQQ1`ha}kZ{DY~i`<4Ef#x^o8x_Y=9m^Yujf-4hw^aF{!6lsrIm_JydT5EVi)+i-LW?`4~4$DG7unlwN( zQ(>T3I{@UQh&~!1vS9Q6oxdVWvw>nUUiS?Y&2J8=5n5NH8kwerD#O5+(0A!gCFwA)sP*X+qpb81^45(oioIP#80%eXM1h^}_$jfE z@^~QfA_VO9EJ6UPFbZUTq@@)|KUB2Z#T%`Ht6|(Q);D;MMzRfqZw-PS z0pxLSIOLI7W`CYIj>vCa4`j7%-Lk|aRC`tS`i{^}K!1WH(@`BU&Z{xU!BjnpS_;r& z501Vs8ybm+%PjicKE|Uxv&0` zsieCXX-SzImCp7m&Hz;jeaDPq`!IOdPPIbqZV}B}RN|^0zF0-dhZtGlLKWQSqcn$o z3^T}qg!`G$)nd?g6f6S(xc(B@ou|m@ zdPQxZMg`WLL!R(%w&h$J<;oy6Eal&zndNx$txCW)$cm`pWhP1(TZI~+N*2kGXlZ9Q z>XKZ?%+b~+1%E5rs`}r!IxNWr!W|(Aw{4XFyIaL^X@%Zz<2yh*z42H^>}DglW6ef_;m2(QJdr2B*_gS;F2_;;vx`;`q(~Qw>dt! zwRKpPt%@uiXh&kT6$}xOX-@zkOsY^jms!R)6D7q<7NeSE)dZ3YV}5IqM0Ah14c82Q zcaL}wYSpdxidODAvM%if3fatBqSAZC09-2c@x3AkXKWq!iT-f$xb{BLPF);M=UoXp z&At!vY%NvaClVhz!xu3*aa~G)R9FUi#<{&Nk2BZy6cZoV)kb^60G9}DItaR?Aozwc zn2x7lvI58hHX5fVT>g5R%T8jN;N;TJ(Vh2;)_~QN`$fBUL0CEEJ!Gh!DVasJ(31*@ zz@W@dNkwEb)!h%VdNN)Afao4>!D;qlEhz!L#S)Nv`GH!#BL4$m_h;y<2OufclIuad z9i#3KLg4+HMm#85wQ6*OAI20QD^838I-r~_+%y%q$gko-k%8ToLl44^c8q>Td5DQk z9}?XmQ{VfL)GFpbB$govl&WOpQ0vEqD|`~dOlm~E9~T++pIyu7^VFm) zDtcVp7EY6Rv&MhwaWPVX2GDObbkfW8%xF=iyy?Gc3`}_H%3o>slaRBv{7RRf6m=;{ z4G6s7W_~yx6<}khjCPn2Srj{7MAO1T@%o>v2P4OeH^MhU5A~`v-#7(rTX%w~Fy(EZCT@s05bcS8{|mG~ETr5L zaob;KOr+J(Qs8-WUazCUgX&6p*6#BPQR0bsyV=W~HDoSY@vBwTK|-!X>?ZW#7j zGiF1-eTjz57M)p+E}kv^?I|upWqdi7Ck8(Lqp@HuP*^g?@3E9gP#lA+%3|7xxQ+ym~yi$T|@sAs%ph z=7$by;$kV<#n!NBK6Qkzt-J~R$nm-vPe5D{OUyoiKjneO$QSM-|Aem;C;K$UF`KD( zv*~z1aQ6T@S~pvPf@f?un-t)~%|gc*FKk}2(n1x*PzgU1`^y75aNu{0F(d!wm8x_s zlRWu}UA=6kkd=Gly4hfk3APc7J*aLrwqqg6HGXk>@Mpp=*G~J;*U``Lz^^zq zG26`KMqNc0xg_eA#ateDCv+03d?&RFfWz`F&$UZ>jIoNK^#?_T3xzf#+T&`YJnXZs zZNm*)9#+Z_=mPVHpdpa^17~PlQcgtFjgT)>WTw-${2ZIDlg)+(UARK4vwbxG%CX(k z$@T(HWqDk=wvj%yo2?jqke!zRc1dZ{B?Vmt_t|N)Wmh0aGQ9_KqAzz@8?H$f=r9`W ztMS-auflwmK;sJx*xDe2kO)6^^eS5WCK6&3%taD+%K3sq7w*2YQgFJe;2&c=5fnRL zv@3w=(T7W={sox@b#}n^27YtgBX;Y70>h61kgFmFNdGmPnQC9EKc=#a==lGR3QIB)0jB>^^k43G1Mh33UPK@Z0Pc?oWr zH~BWXz|yS#C3m*-L?-dsEL%ztSUear=Qo!Pqc%YRb}bO^g*O`j3e=lsErbbe2JKmh zy^-1e;pJkmqV%T4i(nm|LGLaSFTprIY_Xv5a}$}d(YeJUC9N#Ug}p--_pnvOyX9e6 z1=jY@e)2AXO<^((ULsOsUd66~iCt`#`OeWlmxy;0zlcT$u@B1fl@zj^S_NIdRNShJ zr{#F0OmOqP&w$q&-ct5Vo`}455K8@3^zA%V|B%{(N?sKX#G{(^swlu7VA5+aGpwLB zufffXp1SULI>XNoe<%I9q{|sU&zP*`x3Sr}+hWZy z+|-Du}5RO2J6#2cL2LSc3jCV(_W~?CX5{p zU)Y0E%@_B`H*D89@wR8Te8YB46y8o-Z`iKkwCa5875g;~y#DmLY!KTu-26rB4cj%G z_1p!ShV2^8Isbrs!*&hlta|x*ZXVk;+|f1rxIvy_HsIg^>EGen!{Sa7z12S5$jE?+ zi3M(4&iEEiE%6?G4%1sZs4PxKUTmNK*aPd|gC>{J;Gq=AzabDt3P#b!#SU}+sD}LP z0s=aQ2lm>TGRqCbD|cuhV)hSP+|(CQsVRX zTq&m7A=AFM2A0`xBjt^$WULjj=|e1|SS?${cpzc9wn3n>Fh;>Oh3;4@nyTtkG=8lR zk%NP`G}D5$;`_8oPG-3(L^Lc#A2eB5hAg9rw!yzp`N>mfI-Jx#Phbo9Tt z9;Q5X*C|SSgQclc{IwIk@P=?h`kwQKcr<+qqANnOW9!Nk2=8r_r{x|BUl5CXw|#Gl zW(nKi`+^9DN=SnLjyItWz)h?0EwMtaI87}#2qQ8$n<|ZeOC+ebsTVR`ioxa}!QSNnmbSvoFPI_r8)X3NA$W|y`8!2y_m^Ek<*9C#D z7|rum0n94eCa6!20p19(~eS*&f2@bW5=`5DO|z%xa*ifu;Bc1+0I)X{p} zx?K#6?mb3Q@?4t@jAQk7?C`9hecMIqwOjw9;jO`jLChOBysc5g5KsWDK(pJZO(o>( z?KHAdBxh_#xI#|k?!#cxxRy#PtpuZ~q{>RMTr9Tyjxn~tl=VT4eMZ3(fBX)pFhr?4 zMYGVUW-DMg4#*GR2}AfU`e3K%q8_WJxOaeGA5>HScd$9~E|t9l4RSXfeFw0vB4d~6 zmlQH#CwRJG3evXGKXyTz-%GFT5`)nDuXl<2(6hnsiba^I^Y4noNxv51x)rz9&eD9C zio7&&G9BM7vef+_(J#BPqr4P?{~mEw!I6JCqG9h<#wyRT&9`Zg#qu@9J{V9Tiy_Q4 zkP5DcP6*ztvp8W8^Wipcu+>~!wPMEcex(cehs$EY7nww zg`Kg?aV8kJq-q1AQb9d{q}#&0hz^yW92q7Bzi=>cM-@!{QPiahJJ2q=yGj_&tsI~z zOnVRv*pZ!j9>-caehTCP%$wYgL}^&}&b~{PRU(-x_lZOrwZajZGJOvpbJrdW*id8Q znY4OT+P)7YH*>FWsi&~3vchqvI?GKlD;;gs*>38y5~eV6)6***ebh2HZC>dZ2oIjZ zeE=VVm@M3f9!%>#_w;i)T6`IrLx@qewM>;n) zexFF7^Q#<@Njv!ju^QSp7*{BEwIfAE{${HknH;J0>eY_H5rs_w*21Qw?-w3@p1WZ* z00CAarxrJ*p?{frDI2MmvZ;8#@ETSm+J-jc8ajC08gvj1=C5%KS6^{cVNOd-Dz5kQGRGyG4iOTvZ-I;ZyYaA<@JIzX-1r-U>K)L2=da!`VT( z)#xmN_lZo!Nu#TUk=;-kd|_;`Lh!UA_(}u_wZvQQ)7om$l4rQ`BjKaV)r~;m4#JsI z56eDuJ1qNf`(eS6XbKO*KJ_bA9L6}xU!lWDbf8K%jep&d0Vk#M*BzFOsj~@|FM`ESc;=l(S8O|ROttB`;M`5MfN7o(|8QdMuR!0iOz6q{?$Tj*uqvC^XyDH@7x-{8m% zS%s_%SgUZh-&%y3ek?A)rjOr|BH~cxCE0@?!oBg_m?!=QG!#dp{f^{bfgq*zBqWym z@01KcdmV(L7~Xe%mFKW`Wg$heD+DRr^se8L)#((xvLGg-A@(ZO#`BCpGr8UcqHdi*H6Ms3&L1X$8al9o$~QWmi!OtM zE4HFh!2&nsY;yR-m5aP#0br&1c#Rita*Tkh;NTC10E+**$zep)lZg*8fyW}K_hyGL z5>u3;F2!Aml)u?wD3Mh6p~&dW2$Z8PkMVL}%VH!V1?dDk%2~4nWws`UL4~te{B+Sr zn;m_-NP2;7Hc+fsl&rr=0ehV#vDUhek*meJNx{knazoocYP72BSJdo}C2bkB!vVLs zw;hqWK>`z`lJGAFHNid|15l0EH0S8ro_do!{!X0b20N zPed#Ic_)teAf!Pz+eF4_@P$G=J&APg6Or7z*tu2vHjD)vxyKx=ABS3j{HPJt^U{K; ziBGn^$OeZ%N<5rPU!sqZZH4JJQ_^)ROmYM7(___VA}8^AXBV}WQ)*;N zEmCIFS4c@Jc6PO2;*>g>atSHm-rFi2EwJY?q((H$EDOwxcr=pxXvo_bK#A6^%I0PeD6bQ`?+W# z_U>*xGLF{38S2R1xa1B>fo&XuwVcb&e zgwgB&qpwL@gDsSxh4Mx%uv^eywlIH}BeSW*?i|KwUOCul<8{Q@3oYOjO5l)rTBL)7> zgfVL(o$ck7V%tuO3|Bxf3j#HP7QE|7iTR7hFQM^x+4uwT+&FOL22b2jJkg1G*@^gI zC%8}ExQ14L4r!GqwH!ctC_ww` zMfA70d@fpZMsx__qd~-mbUJ^J#YnfHKR3$ZJTD)%}P&ESE`f&r40bm*4%rabU2uUf(;JDI&AY3kQl$=SUz80;cqRav|?PD-Bc%P$rxWyN6D#?+n z%$|K#2usRxSyEQdvyG%I76L3O>-Mvfn~ggwyiKoUWZ=eNV%U8uK)1lb5MZ6bJ^DRI zOAO}1S&=LyuBdNB+Fwf2Q4%5{oo9$#|BaN-p7;iGpjnOhQcm0fB3ea)W#_2X$zE*q z?04jZh|`wv%cob||FN*9X1^n=pn>S)OokU{47@BUtKcf|iv?srjSx7B6>R_t)@1lV zd1Cq1hwb@_6pRP~zv=;v3@#}~1aM*_XV{2a2wG_k4Ot=|N`adeNUbzS-Ie0Ct7r_( zK}W@9VmR-L3m&mVVTQ|ZYqK#4;O&SE&)^c9nv&{KY$J5ybLz6@v9Kt zT|YwBZ=>wj;oKU{P^p3&JM%}F3U~l)W(;e}BJGJloz;9o&6-m%GSme|hQAE%yS^2| zKw(~!0S3Sg*64_OV*>|x?OTy|OD%t@ljUmhH(W#TFW@l$1mkB(1E*s`qsD>ew^4rM z4k9CW@sbWAC%6^}6Oq`j=MGWZb0Vc6CLg91FUR|d&2OWyM_0hB20qTL{7V3p$`EYP zoE4i=sHX%UV%d@^fmDpDJu%&t+5C*|W}78E(L7xkm066Of%TH_#W?v45@;u%I`KjY z>4;B_rnZc`S%wHrIU!RU;gJi%WHnpDE#|yPi7w}?<;dC`kjgtRQX`i0m*t2BcgT?x zgBu(GNksPJn#f!2NGZ64H^-A0BVh6{9B}Y>)Qep^FYjnGf=1XSvH)EoJh8lhdB+LR z0p$2f1DJ*Y0-sv~0j~~rabWXEZ7SX@Q1aL;P-1HqC98deOv$1z%rm0f`&LKjbYI>y0hhMnj_{N@Gp z8;=o?spBqHAa)irfLb(#UC=s&@Um7B?%TG<$mys^53Zuf;3|qA8Uys^m4q9}CBQbO zOGLohM}?5VWQ6&dK@k9#&jwdf75AbF$y?Zj0Ocsa0g(mmbErYNnt_|~+66$qjdFr} z!WBR;s7{CLSFou3(SZ7*N33$upV;m5@`#3 zD#!xLk&Ef^a(<%D3NBR{sH(!Ms$e63+=xnG4wt#Ad;pLtEAd8JRNt)PLfVdoOUOVW zGd%7ZhgULt$1vZln$&0#Zc)ox{v=dPx=+E5_e&4Tfj%&C)Ch6P~3kh(X8{z=m?p zzg*@om-&B1{^s_|tf9ERBLt2$oUbP9(R{O<*Q2vxJ@UAKVYOL-K9|YLYVj9+uE$e3 zG;3R=j;pGZjs3<#SV$|EIO!5b$E-7g*1zvah$&=6rZC%*6zDKMMHTa>Vtjg=dR+ui zLa?E}GLCawB8vi1+3^e8ERr-7!p*V_3G5yjqElVw`%@Z>w{q-eS++#-?0oI*5Zjc? zA_*vO=|RKMlL-tJdt<0zCd;!M&15<1lTc}(Fd=xcoV&Xmm3esZAAR63+!g$(5~Xkv|s8^|ui>p`CEF>;^Jj&11r|DqHe@kT4)Vm(&8w1cZk15>ypU1o^O= zrk(&>J;7Dh$kCpVWh}Hgom~%4%rNJvMOisHzK|jCsmK6s*Kwx0`b^ewsF;Xoocsx9 zxy0yy3Fu!XZ%qcwY&80k$fmcy6Rvg*oQ=a(HzszNzd$pWGP#aSOK>=RNhCg8=)sp_ z56@R2{#w>Yv%~{^g~tBSqX+Zhfo&5_*{}iZ zwLt?^#!86{)uGZV*;mWSs{k2MEbtA^T`rhJso*2bPdO9qauD}M%ih@+!X224146kR zi6?FZ0`3OPssT_f=c3C|G*r@-R>=ww{(#+xz5iwJH+s=~Uec(q!rR$i>E&4{fUpfb zME*u=8l)OyLW9y=8sKFNuM-Tf6P_!&EApU-&2QkXH(y)g4xBcT8WoCy#Na%R1amxtnXOBgu)a=mJxT+?v0^Z= z#hH120K9Vb2B2H)#b%%UU~!=m#*30G@PZF1d9dh^R2(wf1spR}$`V%DGA>(&vKA=_ zLfw&fW}tL>_gwz+8$LOxD#*%QIGY1Jo=78$rg1mHuo(@>Rz zy}(c%^xB@6vD5K?AIhDMk(Y&DP{R2Uce?R%Vv5Oo`K(^y=W?V6RgI@v5Q4*VBUQuF zg&Qjw9QOFXD;Z@ONQ}uZ0Oo@t{8udlD9=j92K17ZjOF}Ila&v63KP!v2uSIzj=f+Vx6;Tp=}qdQW7suqWf0LkuvkO)zY^bmvyD==&Dg(usZ>(e51uK$BG zmq9a1P$^t)T&Wb+a9A0hG&sIj;y2It7XF6jEBL+6u;zO?o`9JVobMkDUxBn-uuz;{+c)Qoan}D9`g2O9x)M%h%h>l90^c#cuma`uXymZY&2qYn8Z_B}er2yns zvH0LdJRnAeL|~%6r_A5CUHT(pOn=mwah-LA1MeLmOb7KYOG-^ZyXM#IQ^aR_w4Ll5 zL?azWMuEdj!F@mG8Fb64==s9Bh%wj@!4NT!Yj_}R3S;MK^gJ-ACZkNpH8HRyg3^PC zD*%k_ul7zfs1^L1xF{AcXP2e95*Hkj!6Fb!#Z0*_&{#6JX1TP(i2}6O8aTPb3}dk6 zU8ZWgqOD6OES$1y??(5vPiFr$HU?^i@qjoRG#JkOVF%_V@Ijh&*{oNg(m%tDK5-vK zbDxRpaC|=br_bZUja%YA`K!;_bld&z_Hm#7Ded|QBS)`* z1Cfkg!7>%E3pk`XwcYXb+5PT8G|g#5(dh^yP2Y9-`9`7-XSHeb-1PanOp#s{NM%ID z{9AY$8OTG|I}KBLlS-UMvilpS3*d)0GRmuPCw|Lm^n_pVxUufG@!Ju1gbhS|d{bR4=u__#tHalQARqx_Kz~Mk{WauREtVEn9*Sx8)dMsOnHm_#OzDU7>3X= z7vc?`^2eWV1b05f_}D_18;bbKhuNEd_EvH>3x-XeEnFSP9Ji zsBN;*HKZ+TGFaLln=Du4ykw(E+lIba=}vg=1?y#dki~IEpjw=)k=0&EHg19o=fD&r zuffG37>zLi!%`|q0Svd!r?n~8`U_;JZ-l-{kpqs>jhj*1J-U1=)D6#{Dh*CdDQ(h? zG;uU2^8~82k{LSZtV&{paZ_dkbO4f|jKN9^PE4C?cGGwP;#5jyLbmHi5%eO)K&NrL zd`rV9S5H>c0mHZv2gkgr#+!($Ii^kK9XMcluubM-IIWJ!%RCNW)Ya)mHat@gryDI1 ztl@IH!4c--x@V?Prwro-I9^v~8291G>h^Y-ZME zFwUTShIHp|3t&dD;ehEMt&C;q=h;`s;-}zp%u0PKGgbvoJKJgWX1AA4=K^F~scUQF zehha(YYbqke`9OIp#YFu+ZY}2_^yr75s#caqZ1x?TaVd!Mi)3ZAIdXY;A3t_Bb`#( z8g2BgaHDIZObmyZoCF0&ib93q+7h}8sVMMrTVnAv0qGBSe|TsTDA2MO(|>Y$^`^%Tvz@oB+3JB&-W0 zRLcbT(?H7Mvy>`(gohMFFaR8-gEvfldQGG9H8LGuPn;etE2xzj{IsOA(IejK5z?_r zR9P`y=xpfigY7Sur}p9csLDhA4L!Dw^J5=PH8;wF>!?Q;<6(^S)h`BsYP0cN*}@_HD9@NsMpe5|1I9)^y`rXHwf z2^}7n(VCn+jb^ATuP2y0>bjw)F%ZjPE`MEulUeRm`lP49F>6NlGOovCT`waGD0Z|L zDB@;n-rJah$A;dExu0UZ_5G zaz+-gqxa;DCV0E6FBZcRDw&+o84c{{Yut@0y;sR9M_&atu!(9WWHeK^I?3cpoc-W7 zu!&k1WuzfO+;#l;o`&>8+dF7VQO12JU00OR5;&PQF=HU!uImr#`yYC&KiJz2da1wB z3n#_*^_Kw3n}qOFxdV)Qu#Tqwq7d>D}ujA!swm7=rE;43ZE(J_yLNm980# zmAH+j@M9|-8Z1Zi<6vW_)wCVg$?y_ky^tjk#njQ?!`I8TX$&#SQT^LPK+!hQ@gWky zQimF5itIMZmdo`60bhZPzS~K|_Clm79x7?wwxPyVz$tYYIMZetKFqjI+3YVLX2dCo z=%?HQVt42Uqiv!^ld?gQQm|O~s5i!*c%yMI3f0_dq{RqcM0wn`4(M9>jiA&BTXdt5 zmSPnh9w-XY19|Mo;~j1!Ay4PwAiW4lH{8g?+mzvkfyc7pMi11i+=YdAX}H91&rQZW zEUPUy0VB54M>iQ=L~vSgGpG%59?^lq09hD60=eR7;RsBQM%UbIEx;?q{@cWc9^S=Ogj^`F`zL;P(G8Sc9cWe(B*g?!p)eX6+)K-xC@cB40d z)8}@hoxTZ{2e3Ynsa60m$grFP%*~6p8)?lg&<_fvuxl=ug=7gl9h_TE2eKc!T~YMu7U-@!gILi~S>o?}7YHO;9(uRY z3Pfn^-NsIa%1Gk{wPq!~H_~`e{doqJypxnhL+&wJ^ULUGAPAJ-gC$o@iT4`4)H)|! zjCBs6x%V2AF`3@`jDeojn`H<}Z?stv_!VQm8Tb`zZrn`8_hAK}q7(N4$5#3;-Ur&G zobq>kz=%{3o2~bQhMrP-1aVO2)gNa}H8;x-^!S5Drdqp_racJhbm=Nu^PusKI&m}2 zeF*QcJ#2i)=%JQvri%|jQ22q`K5X1ztXl)sL^n(E&|XP$J%VF)GTLcLCCz`>$jV;# zG0w57IGZiwl}Gp;JMd2`Av`3r^`nnkCPv-vTMLYtq&CvaJh;`5;K=2u*Ol=%@q zV$6Nq$^w7hBgP~Z;m9_OG8T7T0FRekTN;PF4C7b_;KFPvML=lnOXlz}5XvYp>`5j` z1J{hi#Weji0&>G-3u<5~ zJBX7HtH(^*`nb_a-S4K>PZ%x9^8}dE63TtT=n{d;Xn8pF=o3J(JX-ODaTwy*v!fx* zmXR{X*pPF;hBys|3BZ|0ZSo2eoGpvy*Z~s|Yb+lhM`5P}wvErm7)?B-F}x~S#bgT< z1B8u#Flykap)VX>;n`0@4qZSmJZb#Ux#F}8f8$HE?h7q*=bOs|pX4!hsQdU-9{7X< z>}*^sJ56u?!#ESY6=yKb3Rv+R+BW}dPl0|b+x*}ClL`2Ca{L=Sr;;B3xA6vujPGe< z8tBvOPa7ZMP`X#;X?ym5WP(V%57Vu7o$I+i$&w-fsf6n+9;?Zt-4!rh9`u;g^$s;7j;_)5z z9%~G9Z*cn}EJGhpzmFD#Z5an1J&QgaXUxIJrwbrC z>>$EJ-;uxv0Ha|S;B$Zo=n;-^uo2o0I#6Kb^w^nzFxOf>;0^b6&@06J&~_&9YXU6O zc+CsECZbea;CGBC3TCyP{7Zx$QOjLzT_@FevGKz2KsFBnsk zaEJs)pEbaQgS&Z@TwA(UNvB?b46%d6i$-TU_M#CNi~Z#YM*2Yd6EC8r9W)>5n1Hc;K23SdPQ;n+` zVb@HB2KwYw!zYd&1)2(NqTOf3l4S}2>#~)nt9t$@)lN0mC>Lq=%K*v|s(Tr{^gGg~ zL0a8F?wc}OLwoIYQ)Uirn+9cS6ODN-HJQGiW(;Xk8iR?(SzV_OcmT}s#b~o)yir); zPON`Ej=xhz7p(l%_6AB zN9pm|M(d2ZOexBfT$@xYU}zug;bq$Z1Bekghp~CKQPyRZWm0Vp0?fV)a&v(R=5Mcq z1%zpf;jP<7Nwn<$R#<<~{BW%nVSNlU$x7~leF2o@U;;3~9-|sElA1}1DlAbBvY= zT&Y8n>H6Ae?HoghX|M~ks~z(*-X1Y8SmoZQnmK^=GWuhVF(~;+yqE1!&}fI_0ZZyo zWWPZ7%!M_ojGmqgx$r10nrkEtoKRR;Sm7A!bM@`TE-O)e-@%7EoC$RGwU5BIPgLJS z_)y{)+gBY9Cyc1RJMmgA3m`olANJ!zIbEKMdHaJ>=izady3R8)o#O#N9H#n)M$R)@ zxeD=?qxO>@2+7TZ9S^W+C$n2@DZ7@LQGi>2pl(|PhCwhqdVikr7}i0*SByLD*=80k zeFatziMTwB@cGy+_=@_?H+uE1b$cAV)fI*rmXYgi=6m?X{Ee>x`C2C9?pT=&@Y&4g zu_48)2h)h2f!y z#b7PpQLn|I$zRdKiw!f@f{z4%S@H(?7aJ`G?YD9K9pHz1Afpv;IRaR`a*ANKkUf!5 zln|0INejRP@|MkLyiH5z3HG!KVUX|h+y$br)g)Y2mywLIlNQRjfN|McO({RSW27dqxcs!Y1 z1|aq*6X{o9GdkLTDkA#iHE6XL$=rbG(|wj1ku9Wj7X={MrKc>`?{C;F;k~m6xpgLE zenQGc@Bgt3>dAguu*?u19)!GQho`u49S*L8K3HaS3&Kwgz>hZA1OzM%WW}) zkF!=1ODMib2$IuR8V`Wq{It?&RWHT1SYE9mq(;|5Td&J{*mT&o7emy-tf zv2gPgJ&mteT`Mb$Tf&hTVIHN3^~P(sj&ALGFeBW!{wB;2S+D;;*4_j@$|Ct6o|z<* z$z+l|xk#99$jorxms~2sg974xt**E3x~l>z;JLawQ6fe~2?$hF)Tk)g6*MZEprAof z1F{eiAu3`-)aar{MT~;_eyg76nIi#LfA1eZW@etJy1Tl%y1J^mx*86XZS;?4g4gFY z<%xjygIaQm0lP72&jw!&yqkv#>p}I9@g8k?7KPuX{wsph-S5)_D}pDv->*egTiyCK zS0?dy4v7aPt!d5u@+1H z9y+WJdeU~9UKbqY**}l|T?g08Ui#)$xCT>J2FG}J?tx0ul?J~Ooa{V*6wf_3e@`dB z3=FS@hbYa=?CuLFxvJiG$f(J zCGKstXRQSX26ixy)qux(g2!HbEqF%S4tTcnXz&UMjRCI*mG9>tB}C&xI_dRbp>NG% zY!sy#AJS#72YU}}Tn%xMjYt|>*7ZSM``9|nb9Zy?7wdyF*c>@$L-3u#8t^}_*G}~({11FBAJKpQ z6D-McATlF9V<-LipWw*MolIY>dVXws9<~um%}4wIdIUe1xiNV6xW@}3MaF{#a=_Wk z-Ie66Aj7i-yyxQ$6^%}*KNnATK}TrdFWfX#BeVVkq+I%z#%+QS|Ac02!n}N*Ufcw$ z_!By|5oX3pn$;K_8k3`|8iNB1zlYwr|`P@viE~efvD$f4{nD6IQoO2D*RS9Rd8&J{k0)0Vx^TX{U8{n z!AGU1(ia~fS^?M7ot*Cr(FOUwKqq&=`serCU*v-h+Y#)Vfv?Ehgg*?Q|0jQ1n%A2M z6>zy+et#^svbeg%bccE077imER3(#OHD4=xa1{ua<@9|s5KE#y;{u1Qm1{vvrgCht8FcW)bk%3@1inX$KMVHgBrZctcXsx(IAPYw{E@bO z77UMoJjFIngfGeS7*N5GZ;!8{&N9m+brWA_XkcErCZXKtuBoCCc_!1KmSAv5>+`ll z3Ws*h7UxH$IG&KT6Cno8&O?T1dY-Ot!3^F{FSZ0Hro-AnmdsT&_j7D94E{WrmpKMK z{8gT=wVq!6c`zrf51y{0+dmH;%I?Q~pJP#O{TvSCN9m`}gLNLzeZ?189IG}_$$x{K z=K1jd1}}zGTJqoEXZ-fA-NBRjZS(G6p?bu!PZ^!o)G3U1$`o@yr`~>)j4y+;`2NR* zU&50-tBv0N5`tOO=}t88E9{>wrW?KrUYhZ4PQ=OsNLPFvJc@>VjYtBkGfOC^HFzA= zeT_&Mq=5eTYe=<6D5Di>=6t%M74v!tJ<%FG8!|loo8ZkUgN?`N&ToS6Lj+y?Eu2wX zXu-EZ@@#3N0pDT2AEM`&@35wR-bT^y;8om9FMJoA+!bN~QWa-2t%0->0Wy!8TU~Ib zg!e#~dY&%cgAK`*1ewFGszKjl3#gjj{2l^zF=g(B@O_I$>;se{vp_F!e&lu#cBN57REec?+x<}*m_w9I><5pqs27*MF{t{*^^P4w&!u!}d-H$Ma?VoPCE8(dU^Uxa>agZN%dgMY;2 ze2f12BQ(W-QRR=pt|J>U=`c6>IC8$(z%$T_giaSLMSc`BTH~e)TGD(rtcEXt3|^Qk zju1>%+wF{n==`68m*(#jYgk+=kQ-tR)B8VRXYW%g*#~byHI3N^kKj*KyARXxTl#n( zM&c3bx*vMSzv#mK!HK>_?9AY_&QbKm22x=*Rsa_C^~0v-Gg^=isR&pXPb6 z$}*wSVbj78F~WXKEV%s;WQ$vpzx_GbJ=5~Zqo2S${7djK-($RO;75MLi+%|XJ9J4Y zyh+%KhTSNpj~*_}{L+I|sv^;l$lLu>NhkX3m*DhBE0igw7t-Fluq|q~ zIlp1+gM4P`OZ1OlgRk_2TR48d6dTt&%j}3RL&iVQd`pY1TrSl2q6|9%(6 z;3yX^N2+IGI3$31)UD<-3wu<=mJsK9R5u8Pn>{KNgdk&gDNoZaoD6g3>?@az|ma|jUUeC+>>C`mED$9a2b)x5yZS+Z+Vn6CVUbP?%z6!8R|8zAp zoBcNM+C(~=ue#u0n3k@t4LJ_M2o7c=7TIU^N4j6J+OwdS-fK{Ers|x*RD-MhvmAOj zOZB8{+;n%oU-g3z<4(Ui)AEVrqQc#N zHP`dfy>w%yI?*cguRdUpuRgeY-REEd= zn$`x?p^#(02GpPP`kA|3Ap>bo(*XrE-zVz`Uib!DBdyq z$xt`IaePg#>JFmapQ~mEF!-GryeSPsKOhfJzH*sgLWpvc`{?C7b&D)~T+6C_bxslZ zG2LuUn}WMMvz=u+2LX+P3e;!3>=YNOZwEWiEIg^6}wk?Id&_C}HFR|MB1=&-;S;ahh^$R<=V zzgS)7d))>8$i}uvdS}cdyCxn{+JwB#T}8qfHmfa0z}W%n`AR82-4#>?MZXAdEIBBc zFWnvL9x8%;bdrkWjq=%^#Ri?MRK9}&*O^(9F(rb#V?tr0lxj4j6%>38#<8({v5V^K zx0+n*2Om8gRs)@POS`IUh>hpFs!KetZKP0%Ix2IoFH$IM4aPOQsR0gb&yX6CJ;a1W zQO08!F(eWpcNaC-c@JFDsb5(2?D8R;s4(@I2e!yc+2o!$1KKm(5g%-%%fqU>*A_07 zVKve!|EO=XW7ymfk%!Ml)Qi@;a{8g0x}eujd65ijzKNWIUFbGt0&(~*+Kc4KM53Fz ztInMtO?QBS2ixcoKQEz7qVB3kfy}1*6zgfaxdbkOQe45PhQV`tVjpz^P3@sN4@uDW z(xL6y%X5R|4TcCxonj_vjJTfRd3vsg8tk(q&aPB?xR*MnkJ$pfOvI}Bg`Bg1JO^c( zQ=vj*>m2h~xNoO0ss+T%8Vpd@OTkW>E%+Ekw zX}3Hr)-K)GOHIubucRP?)0n;0#RdxqA*CfAmT+~4QR#4H(B|GMFTgS)M@4d3YEf%% zbsiW2H2bsX(R4bgk2;as`lwupC4XOaRBlDooQqSm3LBu(#y&c~uezJH(y#lfLC}gy z`l&lHL!aoU7Uw~RMGSXkXQ zAES;X@uOkt3|z3M@~C^@A|5o#`-IaGCq$Xqs3 z&FciUDH|G-NM;k#B`*ZE!=U9O)qGku3@ZhQDTO8;qPqC^OYfHXBN-GPrtY-lIm-gB z_U_@Td%6X&B8EavCLhCC=4D^WM~p^AKa(eKj^gVm@Twf!B=fV_+GQ;2kTz1DVM9CD z_?a?CswxZB`!KpFKA+wlr7}U1Pe!SM+@q7msv3H2wCYkcKWffP914q{x6sRn0^(nX zsvHc!%0tx$T@yrj7Orl`1_$&HzU46+hXdZF6UVAYtx>u7a5aPl|9yw6PyvX8lb0Zl zJ)yA~7o_MB>Rx)k7e?^fF^X-H2gay(Y$>-J&Tzyus>O8oNM*p&gIt>PY2a8Df|-Iw z>fDQKedAP$+p}yVWtr-zEQmZyvhYTqRkyHv`gj%U1X0CDoBg4JP$3o;OZ4sG-aTk> z!+*!C&N1m#8#q$!=ga&uCaBX9nQ*}bNaCM~Ca6jMv2OFO6X=8E^4PgV3>4-B65UzZ z3}t`Jfg%4y)%OsDlkg@Uls(Iw@tB)k$12(T`URf+FL3t)?<8T(lPx35tiNO;Ce}Mt zIZ=)E?11tV=6(Vp?%s>=Fu8HeHOy4zdEsD}&*$_y(i0C3qDO^KCC#gu_ z^&Fgj%)|>XNO-{{tQVx2H9Zmo6Mgk4UHCf^80P8;D&hcZ@>x~?yXC4qsrSMrL z`D7lW#&MBZ$4HTXA9Ii*5FJjDZ;t{0-%R%$tBxo^?HqhIz!fk(lK^5k0ZPCy1sZbe zv8rd*qqT0_86(#YHWyI0mBO@euu&kxv2L<;vlDW&;1cbqS?7>NYUHx|O5k&(Ey25$$_ ztHIi4#fozye&^h(E0ewYV3k$@4^F#hzTKfpxvj|bxs z6H&+X#M#3Y{E35u3~13e1-$U!6Ajik37m%bl7NfM&&Qv4#H49g=@arU&CcnX)$PRd zPr9V@S!WyFd-UwpyHDSK{Ra#jGjX>-z46G~i@alsvi^Re?pw#qS&4{-vTdVqNB|oN!XtO4#HvK9I$Ga3l?r6vC7x zSt-?@tVZeBa?vMK{<|b@{2hm z%*OfHen*Q>?t}fBu$L|sfC)T*3!=X6+}OCd|I89ju!jjHszbSiEIn)&%L9}0yA5P=-kWM&Vb&5i2 zOEH9CL9^m>#w{2wkRxA`67kmOxLIN31m=5)!#>t6MAZMGLM+dDeTWz0fGBP(`rECE zq`-ZkEL_bLB!A$koo3H@h;OOTs+zsYh(ST8m!Q+KykZ3afbbWV5#cZNtg#@WXF2h% z@rmSInsJ88kLHz?@kIHjkk^IHD7b;q7>CY*jI#X8V#rQi$5Cu2#}H}MAWM_g;Z$?w z>!rNDelF8s13cMT|KM?GD3Eb0wR&MO8#{aV=R~;|W!wt@GH5CGbQW;eA?_-e6w-CG zr!X-F-sT_a`1N>w(2nO@m(%iq=GzL+AZjDQgAriCPpBv7c7a z+&`JorvGuK;& zA1LC_x|Zn?EzK9&fx>Y8gX?*J39|lB*Hf(2;Bz^Df-)QY!7g+>cLDn!I*Au4RIOVP3DAVY>x@&qsoLvZt=t%`gaaJDL$#52Mflz?rd1;j`b zw|i`e7()xta3Y9Z5#Yf`01!b;i_TWZM49Ai#&>R;ThD-9;0KpHi z3p5~JOmGkTTAX)^4*)w!4nln4d6S8pAhLPZi+G4$$u&bhV335~0P3)=f!w~VD=qmi z5zwGIPb7AI%LIhTvpEQ~K`1}EV`u!jQWka$0IJ731OD@*H-(7hh4tx$nwwCFzB@;a z9`z6od^BNl@g)pf_%pz*xTcQqV8V0Ts7ja zOLd;KOcaX?u$uASo+a3MHA9y{8?0gI^B?YOc5-Jae1$6ZWL-&5-K{38YNuydU!-RU z3b1=dYv!xsbo2ueF)Qi9MXG0%#SWjGYB$IJmw>%0zNA>xO2JsMw%Gvc4RA%lR4H+* zGp6RIB#Ys@9!`Z3QbN)y3&!8*p;b8-!B5I9%OxPk6R%E5jH9%Ta$1Uu(`T?n8OE_~EI08Rdg4p^i z>o(Kz$YLi^ydV`EDR@AYG78M6v)qc!Hp~R81AcshfX)scqkg%KI&Z-;XC)aZg0etV zgz$zlama#mtJx}(qtuI%EFF!ymvF_?+4L<&HhFHK3hEYn^AOX6%(95~fnc@r zQbCjE=^{(4nG2)KS^SuTn6Q7O>oR6h>B9krbK376PPlhuw#jh*)@TB-<7l!fV|C|~ z0}kh9f4EPV{qE763Us2ZEgXC}83fY)cMqpEnwS0l(L6rZ|Gf6Y33~m(;haYME>hWZ%8VOn-zDmlBQ{H;t2~L0e;BObfI3f@x18!xMF#e3-WI^MNlpEe?z&Xjt&WWa zbn`-`kj)suWmOAR5sm<>BKI})dH7RIt9{xZf&M*sTB^5X+H(c+4_B=M-O#+)EH7jJbHG>Yh9Kj?@>B7rZeppIXN~v~2qcWApVLC0m98nW%>GjK1x18U@nbVmw zyV0drV1G=!%Q!J30$f~DH19lBlx8s`Y=qwQWx6|}7s zxQiLCiYCugDq1B^{?1RT+n0PIUa(rg{>ESk&jJn!4b;TJY6R>e2IGh>8!Vyvnm4P|Io&)T-h+R1R6$7r9ivPl}4GFMI5Z5T8!F)0f+aRzPGHhc4Y)sKN zpNhjq8wL0R25w3Kb^=XY=F@TDCV@SlVI_f;MTD5!!DT)Zhiw+v^BA@zq26VVlrf(b zSlm^EVQCRyWIUG8vKS@eNIV~}xK&`W5e0FOP;tC>wQ<-sfjygHOA%e+P&f`-7mWj# zdYKc?V&L)v0u&d*+)dKq`hOJ42jUtZOVxX7ez%>GV8UxoQ0>@xqj>Fao?5PY}e?Zv! zIBdPZp2Dz5Tq(m5#WYQr9&6&j4FY^J12-P9;#cCZjRJcT!!{id_SHCSlfa(Hu+82n zh)%Y-IL;JnI zJ(@u)+k?KD3|c9mlNc1K50h)%kPKRtJ~9eyCo*hxd)WUZ!&XbR6Bx9nJ?O?{&>8_f zib3nzgJOp)kp^{9fjyF8>n&Kb-Vsr@n`14m(v0g53>soRQ@mykqT$u2!(r{XbJA@` zD$Yq`^Fj*BJ~axmE5cGq8<0H_E>&C_(E&(;Ahw|1xVxhfzsx2^5AEV>BTCna%D%k= zZe|;NaxGk-*J18mTn)5_rt}e>X8sQUVJYH&KDD}l_UYd9R8CqmDio(>?`W$~A<1ni zBDz%wHJ&5BQxWlP=O>l!$(}RbfR*FqPFNLI#M!e=YB6KAScl!>)HN+D4%VyNe2!bP zqV*6koG=6^4(?_cSbG1#4rxpcD3`+_KLDT!KWv_)y42xn_L4o))2?1J`tY=!RLq=`K|P=@j*NylYKqS zD4n#A*(47->|-`EMlE;;j+6#d%~yCIGt=J3-0e%WcF`*O)#4Ltw^iUfj?SKFQfIgf zmudwyO5p+G&CUHuRB2-z%5kl@+YIAd8>sT7r95UNvJ6*bRxp^Qf|lddU~H5sGFWDg zVUSAPHhprbDxi|6%FL>i7l-qUD*O4dcz#HgJU@(|S4*7<%HZ2><6B$6Rtwmn3|5mt zcSKb%t41D;=0|lI^fVqFQYVi_@uT|om2HhzRxe;98LT0Le&xy<&M~CpEW*fAEL7V0IFn-=*KYx**x5)FM{JhnAUX*QoXZNdBUJl`x zZPv?9xyF~aR@){o2lLBPdaG0wb}ERyEA=y>2l2adc~{irHokF|BY8QHUslM=;&ep%_KCvH}qvnu6Le|}Wur?>DZS|yMA@gu(CJ+`1>XX|@sOa!DagVZE}xMCnR z0@8;;IMNoim@zfmc+GOD_!F6nL77XVb+}AxEnL#^Nn0-tYOci$qCSppwS>rq4mFsK z(wKO}8`xv-2$4p?r#Iu%bP#;D`+24v4;x2hF~dUW`(>&%w=3g#?{_6(z(u%FGrG{q zUGUOd0hNC=hFGviIJ(&a?5)YQze8E;NR4gZC1PKi$z$G&DrTv0(DGy^Z&ZD;2>Voh ze`BA@i5l}b`&39%+hLzdVgh?Hjmr6K55_EJM%iOFCo@qRF(^WcGA-YmUBpoVQ|%%P zG82cHi*TU|TqIY;-yw_^^QR-Cqn=9qG^@Eb6Q^p-%1nn~%R1C#)<~l=+|@W;92@Q$ zAy`kYsm{hL&VJU$3$97#t#Es^85AD9%y3tB@7H(ei&bWACY6>UqNpC&Z(yu}eFHbr zDCoXtdG;HCvAt1~gt4I06vwzR5#uJoxCd9(Y`4Cpoes4thOsbc5Bla7#>F-$9G2Xj z)FxNRJ!=8Rdu)tbxsf)(_b7JCrs{v039d9TG7v4KP#o?asB83ydwn&3t38 zBaZ~88-r8?sA@I>p(^ANhXR&7r78skn?aCDNg$nKAXNeqVvuSZM4Wq;i**@d z&EsRsP;mML!YoIkH5PbGr*Y`E;s9l80)j-HUBvMrI!EkQ1rk}?YNjsmfAGYHEKlqQ z&^i-5u{8fY)hWTx6|EOMI;xfRLW~k7MngwpY)I-DLm}oNAal0yAhg&LNQ^m%i?BmgkU{uCE34WFC__e7nJvxugjBWY+GZmQmd7`YU%=Mq z_8$P{wm?^=tXZ06i6&<-oI_}3bn|U0KUA8+F^>UGNQbbjD=W0ot$25}vNN+jq$5kbZ4eB2QZ=*Jsh9 z+f>f)aoy6g%b+ZmQ&% z?Teg?G7T0*8rzj|h`S|;9%u#z{Y6X;40QG%n*6lkh?TmSBD+zv&_~Oi<2&>2up;+Foh%-6o;Pz%I zBAazw1SU2#&m4s7!j|BPS%FUz$%COa?zEH|qz$&!u*DsUn^n?MJ6The!#v)Zk*{ut ziG%zy8~LbZBrq&g3k&<2c4ZvwS0pvhMYvE6kY8mZ&wePgUXcITLcT7B{Mrts&4!#Q z{a7VS-i~Y#>?M0fqlJmhhO}lbVy*cFW=M{;h#v&i?MY)}6=}*5#%pd@#F>t}4p5{y zN0#cAgd*cCk!DGX-AR42nrX@TLvz<%%nSEeL;!qwFTwyeE>yWxc%Qn^+GXu-@3LY#)#SI|WsOz}IvuH5 zY4EJ+lt9g@c8EKZXGv0DQ352bGK9OT+ZA!<&azmMD65rLnQDuqocEiJR7b!#BwZ0J z6E9L@k+iN|5r?F!l1OT`QkP89TsiOdKS|n5YNkca-oHuBdcmY4HS2|%g`kbuU?Us1 zT&>x&A*p{1iZ#=)6nd)&^Kq)3(Pv9i*BF}Nqm4Lrz8&EXEIXSx*ru6_nq6RsLpMTB zXUl@!)uFW6Vq=b4nvvT+W^=a)<^_y-tBtucG7bamiD53I$*}NCTBx_#Wh(65SZ6WZ zpM(Jy;X-XdzdYAMzl=W}|AD6zzs!nU?u^E_(p<-Vupl>K+C&0^XeGn77o?Sl+rpI^ zwDAF@EpJP^0Ir1uZ%g<$ye+>!&J2O;ugVoUUTqag z9Ped`-QETlXPsP%ky|<7zdvj`?=@c z{$I=tpMkq;2e``JG-I*qbiip?Rf)!W{l92z5*mZh5b6v-f347&1xb_UN* z{$J6d0<$jO;hMZqAkGSPdGz4JDty5C{#7OY^2q=H37^fKtv^U-*Cyf0pdy#h*$qjZ zZQ#x}N@s`QTSKg~TRK!=HpM&Ji1S0UXn_uFSg{VOpg^eAAv%FWKq_M znF+Wd`QPvS`wyF8-W*CWn5$F%qHyb$)@c^3yIB&t;DukQX;;=&4 z=Nac2mxBMT{}56T4M?Joa!x4fa=TLynUAwg$a5CXqp>0&tUZoe$A;3loG~*q;^* z&)noT-?dKZg540amV8tt>KvLxx&^KUcVROg_9>fGTo-^J6^TbL9F-qOQZjk$;7-fu zFcPZ}lC4HE?F3rr4zw~kT@zCk6VBKSVx*%pP>zq;v;jFB6E8re&H*`VJ>QLYEdNwEN&{^9^h1gkHIPD!8`0#=w}at5*E zNj>M-uiG5Rlw-o-bc^w$5Y8D5>qwckpNZqt4&kZ}c;p>Z+zZ~}v*8z37w2c&XTvk? zXMA4Vd6tpz%$-MPKBfjnbIU?`Wvrc~S}*Mb*df1lS^D_U>`>nL!L!Q*#v?Gk_$#Nh z$ln4^t~(G(H*wj$f&1C{E*3ru!feb5aKIgSVkJ?<8_sx{NiRQ+i24TF_P9C;DMouQ zRsV@@c`KId)MakK58iwAxUew0P@UPF_`0dX>)na3cXfE(miW50!|Srmi~2cpRjW5Vo!5SZlPQU5Tzryi3CZK5PpH10ogdK?PpC=Wy*OQt z%+SUY>U7U1$l13{ajN=@mjPtfPP%8AnuLsz?=Mq_dS>sWqUG}Q{N?H#&tp4j^>Q_^ z^GiF?jz4|^26>;;KoYc*`XnlOb0-acQXOTrlSa+U)Ns$no%HmRPTM$@`2^e-H!Me* zquV~FGoC`7_k7&0PQa6;h);}dFHFIi!$s-@rWi@JGw~ZruAclj~!8?P=+J)-&qt&bt_TU;;EB zeG}-Lp8@p3U9|ief&Ts()y=!`M`TT{qOQ;4_y{hkepYok-r9r4)|jk!zA2Je+deTb za*{1*Y@$UVH;lJsCKg!0vIdUuXs|F#1U<#rLQg-dE=7`IwL(3E6jd8nAX!*quHgvv ze@-3dewZ$KPMw~0=iYMMFJ-oVUv3x`wQoMBu5o*QnMFs|s-Ye#qigxIx{RKvRZk*& z+F5m~-?Z6&Nr1>RW&{H6kWXp^4)Gu#0eXGgEZo{$B(uu+DmRR!#F|E-w9GHu?o^(5 zgQrGt!xCf%o5v9dAe7}qCgD`55pHXB=D#9G>FYWi75?waS%$IM58lM;fzjtfM3NPl zWpBu3+?>JLIYMU23^~4Oy2I>i^_a_$;|b*;Q6cU5LA#1jsCa~@H|pxm)qI-5%Ue3@ z3Q53Su=a7Ce1X$oPVMf_{AIK!Bbs^>y3tp1Ek&5o~EeT$#NWi5l;^|-$tyaRc5n7cR- zz%ovA>yh-#PcPN0VqmtdUWE<#gsg7kpnm$qke_^OR3A@u9#zcEEzGaV3%UCsI|?qQ z5wpUJV*zKcLDtYznzu$pJTR374)(La@hvfI%X&eT57ww5QP%NsA44ANi7b)4Vs?1h zNaY0We91uw$clH7m0sTpw{CVtMd_9Q5F0PpOt)D0&Fzg5PLoH`r%#+ybl<4 zI6um-oA@=9e6s|va}mW`mw^FGje?6%eM4R(&ssH3$NK3F;o`7zE>I=dmE&*TQX|Mj ztImt0G6;^D;{xF62kVYZX$95M5vCe8d3gvpN*w(|6QB<_WV{dh zB3}#+nahkpiuB?DTuk2Kp3-z-{={H$tE=?P>&lES;B)CISB3o$T_88Qk{(XCIx>+p zTo49{i=i5bL=1D%Q&cOF-h#3^pmhL(4LKqhCE`Kd7Ez$Wqg|?b1KG}2 z;>LI@+XUjKV@p2-Bl1P2%RCcZ0HcNJ7k|LjTe0BGF&s55dJx|$!)IuC%%l-)S>sVT zvM$FPYDs8lqO^nwYXwZigq89tqv@a>-mY7tArNh>#k_5^&vN>uiRnp&vj9B-q{vtXxvAqs1~x3g&IpMmRcf&Ab!lAkXkHlAi-?OC4wso!H7&tzv5qxCAl#5AZsx|BDvVp znQKd~#+c-4$P<|{$WbOL_)TuTbI z=F;WsRhLtm5~6WAsc3r^X>CujmN;emD z*_%jrn@O*~2?0|<-@S=Et$ymfLG_4ov?F*lgwIO;#IPc&6(i)-gzt!J#otQAhgwT> zvp&Uhr1_9bVSs6%~jljqY0Q^uE&!CSB95^TH#m5wG1za z(A}McIh2YhIHLs*Lz$Kdgd(k|42dJSQkSu z87qo8Vuu>0guq#Re5fOHAs8}wFcC}XU}_8R_ycaSPQorRF*A4(XY~PWzRGXh%x)7# zo}(I>W;maR`H3H?NOf|6mJtV;+;VHYA*&2j^I2D4EsV9Sx|p#tAc4))@4Mo68#Xd}#vFDGz?vRTi{WWOlZ z>c(uk`85@sz_D16JHR9hN}IJZ<9Sa6WBr$9@bF@Fs(XH8Z#Aem0orpmQ zQu0_JjLD&kH5P5zu?Ji~s#&;M4T}05zC|Pw=#7Aqcw30n%&~M6JeF?aGsez2oWdO) z&ESs0;cKxo^g(>#^j|exA&Az@pB&S|H6Yl?Ce$9>`;8RvN4~9eR3cl@Z>%p6S)3}3 zyY57{FdH&rNbQJk?PhK)1Vr*#DPPV80Z#5^b?LGfKQffVG9-^1PNk`VHvs= zi(YANXr!=e8@Ey}1|~BI`j3`g!oxxCirjXkg+R5h!1jAGjj)C9JYX7Gkp$_sq&DlU4P5vKDpu0Oci^%}uD`K;{n&ALsYS^WO3onoXY@e4d-KK6y%~@)0 zqHngrclI?k?ooZ|s3zPmGn>w8f?MJ?s%TP^(#n0SBRO>Wd+IbA{4Q=ex}8pWSN+?4 zEB*Yg+J*G%JKs~MxM$Ju_f=o`$u4*wj@^2C`hEDx*3jnn)tE5WU!Vbx5(3xo(LJ1n zCv4Z@V%CYc$~uGkZ&%&YSo4U|DcjYMp>9u_Xd--bQfdTLcA2TzSaQ2SUHpQ0n!-Ch zUcR0WJ}k_BT8VOFo=4n_cnl`sc0i^FV8a;pln^iMupS4vu&V}9jT*@0kaHq@*?_jAbdBl|o^GIPuhl&GhRcc?L*{aN(* z4%O?B=NSX^E=}$S0S0_$TNx-g$lVHJ!1pF^k}Gn5sJh^WxsnfYMbYDQ?1xbOU!jZf zF!S0$~DLwe1`e*dridzh`ztIm=5qEn7yk;@UUhlZj zTqeZ7k_!^vX)Lc zJ(Rr^IXUY@TvnyU61f_+XKPJHZzFNVYGFBo1EtcpueeLIR1{g7lXR6+%+< zs|~DatEv$FT$Ay2%2E?Y%8it3OgP;Q&>ss2MvI>NOpWVZ29ofQmEzMZI@DFepPYw* zZ^kY4Iw-8AE-gx*V&z|8nagDw7?$%ecz6vI-i7~J{5Sl%kzaG>1&>jWCDQ3+6F-Jy z2cCK;hV*o{&X0oRJUfJk{8r^H3jO1(^acXs8zF3K>giIJoQgmUQDabdiBM z6TmLJU?aXa%l84U?dgz>k~jdvhVo1&j5G2A%n>@b#X5+4Bn2~zItth zzRjmlt2$)hYPl$r2ZN^y7Hbh7*6LmD$Sr#dp{AB@X;ppTR$11H98Ig@GR|$TsHGjP z>Yv@7EzHbhS^yc>U`{A zo&K$QEc1yHk2jjeLb`^$->IIwlCvvMR`LKk$@778*~v;i@jLaD9|_3#CK60r`t>_? z+KIF0L97oEvCbQODc3}RhlxzSkVWw3&rnk$E`TGuCu;@iGp@q!b^zLdi%nyZ3p24& zviy}Fv}6xf7Euz$cxL++?NtM5%=fAuO2RMStM6Dz2nT3P8)y@t&=2tF1Zd(9*hdV| zoj)LXcOc|iJ{$HX?*v7!oPix>_pGuAeZ5ol=@uC8E9d0jZ8@_-f$_!seSi9_GJNli z@4uwrDf!xPuhi|#CcY`d+%C4Dpz+a1KdL?{ZoI#^O@%YKkw^fujOMket|_p1=$STL z&lsRRZK~&>EPidwmJ4JOvB()eYIZ0`S{mPbcG>vBnyz>vYmSb$VwWnm`Ee{_li+ za3A!wTj=Y3P-JFN|NU^u)zg{#F+JAM-2LjxXk&`K&RQAKAsDRrkw7H3Lhhgk1rbWqw1%v~rcL((amsg`^FSZAGk9mi<&~u@H^U z_?9i}3QP>FgDA@Szyge?th6uI~Ck?ob*$|SD&8f)W%5YKeQ$1>Ll-dQd zMj)A1X`m5eiP*Qut7sF$H1S(8G@VA8;k0x!{mo{1=cuO6dnim8h9uS0sW}BXI;84W zhGCT}d4U!kij^!-PF{YBKHGg`JxequOUv>Y`!P8NxGK{iU;u?{HPS#d2C9+@xEhNE z>~4vMAFbxcF(sInzci4BhXQXBlwb#CtcR(bJ6X_X#Yys=Vg2KJ>{TASx#<=ROH}w2 z8I04Hff4Z+z9^#_^uj($+Bs-Uq^*SpWG;rYF-_dQa4@BX129|gA9rncnic=%k`)+m zEl}&KnIf1GRroZkplZm#!S=~8URijfKF=LkxLps0jLLEAA^ig;R+_2H%fo?`aiB)3 z?0bPKQ$m5mUAzjWalU$%n2<5k+`3PCVCf8&)C+IY=c1CI+#Tw=?SIw<2&pUIu8TM5 zVYBfvTT}bGN0+Ar>XgT4U1HVhrL(;HRBVqw>D9-T)bTnq^q}Sgbxzl#1|7^+z=@^< z*$PyeuIA`s-W%dj<#yY0$h4 z?*@TqV6FIj80wf+h&-411mVdb*e}Fiu_J_ktXsLk3`qF=Dx&?Mo8tCHS-El3jW585 zonW&>xl}?~wm|^9I2jf>r8)4WQf!b+wv6Gb6_XgM6yz-IWbBPV`a&yp!F)?MOW?BN z02yeA%karToYnjZFXa;cv`mz$G*0l-!V?FdRhEr{=65IAC^%^ovr*FF&k`F&j<3M* zY2{YL%4u&lfRJLsfKc3km<>+DrI`D?Egnge07tePI`xGylgqn1A{?Z21%p*$Mj}|! zF#&*HC4G~v!v&bHi1`-V1IWw!m#AkBrq_LRW{w`1o-^JD-pQf6@xz@%t8=tU&EW^z zb9A>pIhe?)Q<=LsjtvxWBL)_60+uO|1BHW40h~Lbb8~dx->$6SK$X!BL+g>S!4Lx} z*uiR*W0E4#>iBuR^sNqm;pd5I{^()@4atQmRqutnM3j9Q%I#c+b$xs?L#)M-r!D*) zpD@r6xrmx9=>>y|!-MbhvaT<{O#s?u7i`4$X88_XALaw=`dAf&2ae2A4XXwrg}PWBGoWQX5hSfJ?Eb`=-50R3guYs4Qti zsTEuc4yspiB{{giSeJkiJ_J-QH_S7|bEvCS-qvR3YwdjtJ_K!kTmtv0aSizi3KO zAL(94PX_fNY0Ix)YL2Crpq_xcjRz_{%Ka=|tnh0EEmwLfK00e4w3Wtay)NljA3Cjz zeguzx=%V|&x7J3w>igZE74Oi?B@lEQseech#>crKJr;MsKM>MIx&b>YA?zBSG{W;| z?6EZSg)e3k>qNM1AjW#~9fW=#MS2ukNsAhE6#Skccy5r%CG`YJDjR;OP(~h-nTn~vZ0v{r#sj*q) zJZ?FcSnJRfc59>~f)4b{G&a&h-DNZyyKA%aCd27J3qagD9!Ud~$7rCwJ#+-Tb9@hd zDCk_y9~&vRXA(uYOu0qW)*eC$1^|cj>#5JQ!4FC3oDKeS!ySAP;lnA3ZCk^JH4ek} z7H#gS&&hb(bs`oj+$}*Pd+8pY%BO43>!rOO@ay0DfX_BjMIU{y=l=VtUtc{WZNa09 zjPZ0~U){xnK`!m92YBwup~k+tQ(EsS=GF&m*Z0*h=|806{`%@HdvA|7W-y8F>#vXS ziu-gu?dY$^c|LxVLIZS9&&B|KvrZS#1%q_@P%L{6uR>q7ovo>Z+?T{XGy$e{s@V!g zb_0YK4$v2RURz9U19TUCdwwJ(#-|)j*}yE-;F)5+xtN9w)V-6MqiY80Y!B*~JrH$l zTuh%2{LMPHbgZM$>eFnxVUX_1Roy>G|HV`B9Q`^-cY%Ap%V6EvL(kEu!MenI|8uv) zzd?gn=}vUzVEqrz3s2EkgTZvWD1C_jJIH zfcq`%bHq@sbMN!x83q)8L1-Ul_SB*J6mZo`LopPaC@>7n+(`Y0X%iKO53!ibsSu?q zE#|s^7+So8)-(7w^u;he;^5%h9-tEr(P02gPp%J&zSIYR2jT!=)^`ul12XnAPo{t; zX~rSw^IOz=IHui3dSbXtmhHp!5r@Pl3(J{?_++tV+0i3l!&y=+)k+429-lA*6JtGH zF+v}Xt}YwVL0&;lfo38C+sP|X(`1>ccz@Z_?ExbtsxF@_xIDZ4Y2z z6Q>(H100b%cnSo@AE`|UMFpd16P4MNNutw#<(y9q^@QP{;aZYt9u;RPtobl5r?!}<-ijkf48{7_x;M{3yL zKsO(%y8`Q~L-jqt8mrR46fXoK)}>xF=P+PtEl%0IILTrJ0cUKY^@r&oMDHhuNe8g} z9Dxo@I9!kN?&fXOCK@v)Nh~sVK$M!>U#FVGA%>oNojyEV_bB)#w%9ljODk#07_IyW z!#3M~vo3HgAVVx6ju@kh6P6{XE(=1XkFrKoz`)(#jR6Med+sQmPsfbW0c%MaPS+lx zPl=+ZI4OXmXdK!tw?L=yRK+6fw_X%S;4TPx;OVnYaRCTxjn{ZF8`>J2H1^N0Q^wc3 zPKW)Ax=;`swD#fzfKeh7ID#Hc400cZP+xX5j>3){9X(bTxHr?eVUEG6b`D5r9qPj^5(TLaY3)LkiT z>QT-^rdqpl&)rYAnL6x11Uo|PS&Zoe$v{V#S~+kBs0k}}kMXi%PZ+OX@w^qFK{dK0 zXA_6VfTUuQ*>Oyz`*ZP=}|i9S>8a4k3uQ5`|?pB(I)yCkMl8b(8Jl6hU0+p^+oDCK}W`Y zVDQ?6@hO(E;TxF5VatlT>4p@r0eF06;xa?@=I?w1C#K1q2@qi$Y25@p3614W)REAp zfa!*p!RDF!5v?x=%+h25^cwfmsS|ZCjNuIvQKpezov4pO4ZcaDM)scsdZO=>CqbEd ze?~- zSX6iCF?zTqC7}L*LSCq)8;_CB0ihx<&wli5@Ui*|&zo=3!^c8h*+^@y(nWrG^&nU( zK#fO3vv~hFokfF>*WN^cEpK8gHaiB8m-z0T`~OGb*OF^bD%^0O!pzjuuhs8K$VfgAq5xN6T z5=~+H$(FpeTVXX6OCVEx@kzR`=Yg%%aFQXZuI5*elpB5D5Sg}2s66eBhEp% z5Aeo~H}vEwy3$w5cb}&jpV0}Y>QggfYB8>k78T^xQ}ywATi&U(1Bu}AM%g{@P~JobLPr8WhDSP$h8!h(A;{$o=@~I;`U8uws#f4(&YMUfs?R$vfl>eZG4R%{xPPVZqBz0TH~o zh?Q2Kp}Tu-`Y0s=7|8YX)t~iwk)Po@ z;+2s*g;gOLH=IEPJuW{JR`-jv;7r}evtS9me5Tgk1xrLx_~=YsR8+;^tuT%2Buaw? zXw?$RIZOA*j92I;<2)UtGtbifiW7>I%V@zvB4rkzr8UpO7tYd)>`ry-*eSaQGTMjB zV81lFtEP2l>&*OkgG^nk2cMk{0o_cQ=jdQC{+?G4R%x}bM#0A!vxR8+V%vEJ{NY- z?hN|txq9H>4H$|+?mF%=2gsx$=n0oyU&R~hX(kM)>G*)|#Pd^#N^8#5CB2_sJBzO^ zhJBg9#w>T+AxFOR^kq!FYtPeT>7&>4IvacZyyjwG2TL}>qkg2?^Yj21oS&Sh`}~EM z@igPBlyI6k7SlaN944U@h40v5!7rG1W8uOo#80fPV4?BfYhqwyEunvc~ zI`Lirrb(-t_j%alHh0Tc%3e`0nC?7Zt8m%`l;%c@ds~&Zb;ta=j^6MN;UE z^L6*m*!oBZpL^iZ!Zv~{*X@d=@P1jqU3|0yaxIl*r0gLaZ8{ucW-Ryd8 z;a7jTK$mu^*mH+SnUGf;3Yf#te9Nf~@7&Aj9~Z(4w3S}IP*+>OMp4yYbe8p~CvEwQ zF2nOP|0+-q{Z&8gE+_LM-KqHFtWeqjS6~7kM`f~#qTn)?(-jxNeY2eIy-4rE$4!5O zGPJdJ`QIR{<39rzLmR};BQC*w+FEXSNrl*{W?1iW?!bqV`8tnO!w`v3fn;2aD3h~$X(@*xZwR3ebfw1 zhNsc|u?Zab<~Az69O}>4H12ZP*~_-k*_UGl{WslqxxTUl72ux6Q0f3zB;rxnse~gT z6!w{`=a(7go?EH&6*`>BKQhA^X2mT;6RyyQ7t4x~3SW-#Sb-$CTp~ z$FI|ArXE%tf0M!!*}9Q6&3KV6oT*EW64`bO;y^8pJe6lB^BLQ7DIl?F2|t9HS^#z( zJa5wgz^g+Ut(~by^HltGrY>W|MHjOWueego0Vte>e(pLd_hLQ!?n*resFz+PP6_B% z$}?{R?YIicINCTGjGrrqk%OyV3PswZV>0(pnvM+c*u0hV)8LhiMCI_{!@pI zR%9OtBO)f1n>BBLlLp_Y2QsOKTmz5KH8<)Wxld%VEEg{-!~C+5mfff$o`)N#@kY^L zMY-iySeu{v2xJ_56NYWp12pv}-B}Y~FYWkY*}qlIeg<}`mOnsMH$gM|#%j%*P{pFC z&T_Cw=jU)R&|5cR<(n7<`B#@vX;hz6h&>WW$p-h7h+Jiu$~#EK^i5QE?+&HM$|k^f z^Z5|S$q7p$P{v%5eB)J_SiNaPsqS}VD3WForF+CH0rh<9fmsUKOxz(}w<5W`{HI!4 zQ-%q>0jxO4eYgaab1U?4sV*oSH#LN`6}>&9B6&tV%W`}lN9#*<-{J-t(_CYt6g(|n zFn5+NiXP3;LK2LKFkoIx(I&@GIqn8V2EH1r)pKHnF;Tq6^BjhUkoEv?<0Ds6Z_l46 z%2=_6EfXWt;G__P+!r0JU_X=;r@AQNwx1GZL3d-lApFl*QL4XL7hJ~njhF?IJO@?Y zMVOB0a30*^DdubIV5_9_YHJ-^6Ouyj>VPuCV+9pgzPSXdfzgCCGQu!+8EJA7>WsUr zlHMP*PDGWlDwYH+l}$!v53C1*8NUNT+fuj40jGV(>Vl}QlFg&k4%HEluX{T6nWg*o zW!dJ&Azq6K7UTZ%4phNW8+H~`?Fe0xp32eIk#wq>1-Du{t)8WapMob)kG^zlDNIYA zyNQIAD)_XH*=V=NyvvNy=w`P~6DW=|3qS_^4GyExWx8uG<4v4IZsx&gk+E&YUx#sU zg{gxn-BqSXoQM9td@oklBJQs}xl_y~kcHUf#pb{D4Ry6iEtWC}rgZbcSdZ~KN6bmr zQBiN>aiO zDQ91E9v9Fvc&CK#V0>9Bdr~um!Ubu-52}00Y~3?=9f&x{y)2=gk+WeIX3&|l^)Z7w zj)1@YxCpw4s1+R6&DLXz(LA&0GF&4g{5}D}?DAIKrbl(E;@;m3fq>0<>vl%O0?@}cP;Fs3>}Ag_x6{zu_4Qn=cG>OPoc=XB3Ftab=B zH(t!c)W%Hr!C!BdZlHVS>ce0&Zkh|leiQvTSNBf06+>#A3rF0T3LS9F;1er!cUCuT z^9=f;)PjxG+bW=>E~B~%eSRA5A2mKCJx|YMRi{B0B5czZDfIBTjDlP#CtGbOAHoVo zo9Dp?w~dPCizWx1u#nE15C58|b2rV`BW-mKiF)VjoRFo{AswovwV?k#+rQw024K8L zKhM`!!TOtdkDe4dh*vhob~JEQPG|b|9vI}C-ymaw9_HEl22EIi8Y=091+Z{d(dP?5 zmZg54>BThfL7$>w3w3TfoB?>!o5qIxivGS33Sb$PF4RNQXJbSLxsRqb_vI+svJlR% zwbW@5?BvI2_#%Bu{&zTg6kkB0P8k29`xfc(euPJhdV$}cF46=0tn#vwGQj1BXk<@x z3dd6Lcn@;JIAZ(Kdb*UxkRF&dlb75U4uJiO-X(oRhBPHVLzQe{ z)Sg_a*SI|uduj{r(`6~XdHL{(VY`WjJOn-S`3KP!<+Yvx#kF-s@$j*L>>xFKh{K*bq$AazyHFqD{+TmMkFSNzvDbJgLLsP1eRHpE!b$j5QNek}K%SClM~UfZljg4|m@~ zKR*eDVm=i=2Rqr; zbBg)jH+T=#`~)Ay9W?D}eL`nPA@N0UD21&$s0D6FkB#*9(_*w_KZB1=H2E3W`0MD3 zXJF8LMUOnA$9pyzym?utXAF?ywB;g zVCFun4eu&Jo*sKv|E2Jj^spuQ*(znfzTmG{=pH3o8EtVF#dha$Mg2{Hg7Ry;LAk47 z@_qCM-LL|CHP}h3T7kKY5ncg5_!HFgIsI2BbYSjt*rT#S2a2iY6`f9{b=pg<&xv>6 zT&aubf;yebn;s{=4eP1@^E%TiTSjA^hkFuR^nZO`_wrybf6ntTWZ5zQJi7l4yX{k{ z;(5%VubXgB#JJLaQbqU{JuQ2+?Y#iZxo)xYcN}VnO+R z&wZX*f=J)@{eS-YA?$z)H9c(=@-Lk><|Lo4iWUm|c&RFc9OoDwEbAK@26KE>x;lCQN`vsw0DZv?11fnm zi_?k)ye<8-)f|ts=|@*b2lo}Ni7@{$39Xe@lx)+NUr_{A^$n8uV4IVBaTeqLsk~6(083Yq4_iaIZ#RwHDJb z19egJ7uZ<}38?{v#9hy#ce(NDXklnkM}waH7ch*4CTCnSmwA%Qo60)9>uC* z#Nr~3yPQnnarexvEVQr-(OGdi00^juLrV!c`kZTc}n(V)OhRRxLKTDmR0(g=cA{WvS5s1 z68c2Zx#%aJH|oHx&qoiCW0CYy&l4pky2X870KLD|C%nLp`mVnE1yrXS_5CkIFUo_Y z*)Z7A4Jcuu;fHKMYX)6CeFLo8m-^}r(VxNDG=14J=9gDu46pvy{xjK_h<@urfiUX1>*^1XD=Vc|^I(Ru#k#psvA zwx@j50vJ;Fq+e3u^}_RFnC@G~<2P4NUxtjp)AHqME3B6>@|i~bH*sN zfmvS1bPrDzDCbNMPk>dz3sZbTuQ??y z?MVww(qBQGT>GXn1LbBx08#QA60pR|_!t`%)8s>=b;`hKtqd6;wR8Mqpw!N=fs#=g z%O%4`!|Gw2flY_BC737A4xS<)+MF9RnOixQGqcI%i7C5BlcUR=Wf`@m%-LoO2QDjk zMArWjAdrKoi(^I8QVOD_7Cs0_C>-_$-JD6q!5Z4y?zb$Gji=t39hCDe3mn3;HX?q@ z;My>Mtxg17yLMgi{b|A_yG@Wh{A_55{OKe96mh|;+ohSX5s{?E*&Qe= zOLaXAS|$5&2JAi->NS6fR@$EK&i}}%yh&x7l^W2UE_gNCuNV-U2%Ibrw^N_+N;Fm^ z-3%FI-b*!Dfk-u;*3@)FcfJzMk9cF0nPA(KUch=uM(mJwy@JrcRS$hNTAOSFz{Dq! zBT=3$vJ{v469FG_t`{NLzO}%gkH^Sv0BfehlZ&{@2^4ITqU}vMy!A{-#*w zv11SA@P7*?ae$M8#`3`PCiDWwROvFfeZlfu;G?dZwH<*-` zkO}62w*l=CY$=3BUD~NfDJ8D9k;OI=ca{p85B7s4Ag(zA5J*X0G`j?0wQozA7AFCw z`QYU>=$gt4L-ko%208bXO;%GwC_#YPF1jUwX7T6ae;gA-81)2BNECAQq$G*$&8}Ve zJ~7MqDB-1aZ~%8ChzJSWOd)r!KkBW#y5dp@<<4Pvkg7HSCe^nwX8hrDWC2Js-Ev51 z>V>4Qlte7r-Fp7JjFa|h(z8ZM1&vR@DTANJBUEstS*AAWqI4ARaB!rPfqVxz1mqon z9PiWYC1!WodxwwFnP=ABw=;{YN(Sh%K4z@vOu0z(=V`LA2gu8w;jJ)+VPh$}x6Izc zyH^gY&ACQa-eek0QoZ;;)NC~MS(SdP&hLg0 zcFeT8=xXgR%(hdX^PRJb&#i1C;+rc`eq?PKZ(YzXlOj{!&D1vtjzVX8a%5v7r=Vt= zOqLP!eBGo}(O@vQiyjbfAd@bIyL`-cS4qLJy(pP7I$@LYm#7B~vq`cN|9_YcvCO5L zEkY}ko@K6Kn7kB-;Dj}SlZ5w4Q^g4w>l&(S+1qUPQ9gOoW};;ovVO~+u&ejnvJHM~ zG=si#atp8yr5#xktL`NFPqgZs-qjiLQ$Ft<{m9n4RnCpRvr(^3{pK6dnxwZS+$M@f z4?O>ywmioBzc)qq%>k9yBd;BAm_N&+44L(_lmh1b<1IwcM*Zkp(J|q_1dc5hPsLmupnBfI{&kZc(S=s|X+5GDuDd^Mx18)Py*5`a9A7d2h&(alWW4EA_&6 zqE%zl+iDi;E;V_zV9%D}nv2CmLAnL6EGo#pde=MXced*Ce?$)r-nCv&{zr5*b~2?~ zqkW54e0?qGGH z;2EhR4WaJ3@Lgj|w3G}X910eEKog)D!N_E=K@<(2y^E#&Mm^#^bRSIcaqmTc3#1=^ z553Puz5P87m_Vmd?_-vazTl+yIT83$U;ch{yaKA8`I8~L#D+H!lf-aH5akug`Mmz> z{pdk?i<*oYyYpRucYL=|ZJpkY4){??59w5TQ7#d%(WA?^Ag%$cyU`yr8)tN*t!5&2 zbw@Af#Od-6j79SwKH%)=DgEXLXfSTn)`zGr+VzkRqx;m|R1pixVNb4XXL!5j3OT^U zlK^lKgq!t6A5wCi{_}^?QN~f>L)19i*6GrZqC<-*>%%6WU|SPE!d~az^*Z?xTIU}9 z$B#hSHTuJkX!0{%@^SRQylv~)B(Zy!q5b@0REBHx1s@yGE&Dh+n*P1_F&(>67k?7H zUNr5ue}blEtA6ei+|RbQj{IlzSPS*y^iQKV1Ya-K(anz11A3yr*Nb}?FGyVxy3@E7x@V%I*}j7I~=bxY~FL-bKK4=LVu~mnA?7EC z2}4(qBVqnzk0v6Np%`-86>6^= zv`V$L13S+mJ#+_jnQ%rsqK6ciI;cQDIz(0J9~7vFTpby4ccJL9;t~DC4pzy0{mzc) ziTz8>Jsd~LTi+Gs73gnpX`i|N~S6;NyRwydd3hj+=k)>VnwE_^o#90 zUMvf{!0lo|7xP4zO%TDt#)iE>Wa5~-e8FKpfRG3c3{;S<8$v)uRGF|-@R3uHSb31p zlm%Z$qkW2E1qTMV1p$hj2l{Q=`rxml&RH)HheJ{u*<-h8&q^#wZ0k zBsy65;J^k6?A4eqh-o$&#eDlNQ?P~jIv(dM9UF@Ol9v9K?!+nBY?v59@>TRc(a1$0rNZU&1Eo9p6M9l6=&W)B3_VP+@){BO2S7pT2tatik9%N7=+J_6+@T6c_L6tbfE+nL7MOu9^xmk26Ih1~JdC*3%a_^>PiCou;=2MCd*!6+e#>rK42G6X#6Btemlt>;eL zela3YZbw(*F&8H2Z@E{gXMRTg5(yAk*s-d(fVL2r6vj&AzPSj1_Zu%{?CbJbqXae~ z5EYu8I5Y>u%c)K5TXcd}gqo$NdRB=_*9?V>nf+jtI4>od`et^zm!`Na;C*=rH-9t@t8cf zmnGv0AbNpWD`dm#!^uSO5-t*q$A$b(VG6-p0W=i1D78HX(MXax5>qB{Osp@!0wcM8 z&`FVEQT`KdJi!JUAHZ^Um>qLvLIQFGF``S{({dUQxju$i8WI4zI8DExa%if!z!9o0 z)L_8aP*XXX<;EG7Gk*!lA%U|PJ$KF&n;9vV(L<7$6ZCN9`t6{qNHTDr@brz<#Z)Zj z(+}>n$pSLbXP7s#a&KHJ%2$EAk%SsI=*N(y8(i&!Hi8Rn3Su=Q0$@EiYSPiA<-kX1 zNx6y013wXQtKA?hzojn@sUgWu?u>)AyDY1U&3P$`qy3o{t_mZ3oM1+$|s`Yu^AV{Dotm%Qy|HpSb9}v zaz%e>e`x41ckO{nRK^$#dlIpKA+E>8+TYfG{$vZ52qP z6>d`4A#anBfovwT48DU43Y!QGJi^-2tHz{M8T0I(yQCA{rhw3)E@@{-aB~HU$PKr0 zB=?mm|FKuT$bHc|nxJg#qB4BbvmUQ8mSTlH%XEKduf`8Y=Y*xvJ);jq>hwvq|Q+5$=)g((sTCMMmAh+0nO|m;Q_0 zq_^q9Cx&BjiOwX}=@7Ae!y%L4h%{ z`RRQM)X7Jd_}g)bY{!<*Y6%3pY>pQ&z89tNs;_t*VMZ5|0ffp-?e%t&D@h`|f+45zp^Mw+&esgC)n%NcX zZKPW=aB-42CACf@e7qbl3X*vXubwTIpk}Njkgh>qcoK0Q+!8pXFEGoIEuhFXcSxnT zO3tJTRZYCn<{PBf6xbn_B<#{zJF3Z6Nw7z^g)9Ob!sOeJ8tSyv&R*Yghmox178{D^ z!YB!*vL;N7-{FBq=~o+dqaM%WDPUN@{(61;&ruoFu?OjV2Yg zh%uo)0Luy1%-?qV`z+#o&z_DjfI(p@>{uio627ZMKU1U*&`%ese!4WQiidZ2)fxUO z*RRU$G^0w@+ewTJsky_}$A;DXx)Lg*syvx*;loqn1+y;;Lf;)x1B$i_9?OjbU|vKW zn|KU*BQ)9|0(}3d;gX`rdTlrkT`k456La>8*{@Z0-d@26nfU8sW)pOBeSFGK}4eZ!XOQ0W`N1QoOTNbZ-drIiPt& z@7h63jv}pWHSA=@4c@x-DJhdK@9p6eF%Rk|ULGhS^dU76>=Y zK2*T!lzpfG;=r?s3%27`rs9$Wau2su8e~Kiu;R7o4XTi%FwY9>GYgbTmT|p@A*EGV zVsQ1q&KXylS??nY)RVpPWXAfywU^34PyA8Q!1w2?P z`ktj`a2R%k69eE{Q#K!xzt}y+H}sGKK!hv*pl6u^{bT)O1AbDQ=s%e=&ckzK{iiRM zHp7>qrJ%HV3bdG!p~3qK#=@ELb3|ZKfxf6t4IaCsAaw#A@R-GW4MQyi6v|o=>$8`_ z$-NgQ7V)%1ALR3k;*~McR3-WeoFK4tzc0>3BDx{&5TjSnw1*qTkLC_cSV8!)h~%&G z^4D{@Sl?KYpMP$lmw#zt(iGr+rr;$4v>0s#FQVkE%PyK0tDH8C$uKbM03BSejAQY@ z)fG;$??$c?=y%HpZbma7xDbsG87z$TL!ZN)5omb4M8=Rx@aj!A7J2nF@RSS1sK?Vu zH1M1KlslI0H0p)sqUiv$>;Bf2odVh<107fmdtOTz273w*rFm-v;ph@y?%v8OQ6U!I-%mP(aK8iLAoX7)!gymEh8 zqW^?g!{US&7IY%vl6@YPIG_>tN>&KKlum}-!AH~WUTH79R|o1;*$E2Yc+dSqj{T)q5@M ztwSQwrKiQ9PNqZLu$JyncUFhC_v(;W!#2M|J<_2Z)1gobQ2(nARqP2VJq!}8H>e|$ zPCXqLy3ru$!?kYdp4RgldKpfkXj3>FiwyWC2IhxTlWm#R1QC_VWkb!Lbo0$m|MsLC zIx}f6q(UGgU2KsjU2c}Ha=}}=tIejX-0)`Kxu90nVWzuOT4PuGj@Ih-(i-}hTD@!t zvV1g)XC~Dn6@DMur4MpRTxvy|JKf~MI3L?KhqG6-6dxXV#Od*Yn?1$m+ungb za_>O@HV6!2>>U6;?g~Z=F7tG``7S*`uDBzGtMB|tX^Lb|im(O41M?-SF{5gXFcmP> zyY(ZK+6nj85#-tai{m(S7D0uxcP6zstdz$byC0^JPZfWI&f()MP_&1p+4k0`k`Cfb0VXz^kUaJxKF`E8RY$OA7b z2+h_8aiMj8+B}&#ctk8o|GP~8!4OoqCGKDWXj*VZGa#B981{hC)4DzW5V%yt{+Ml) zi=Ruf4-t^tsXVRH?_mOrYGF471pmyos4uNkiM_2M#QJX+HEU>Zr9j&MLn*oUXU`1d z-rC|M@BdJW1OM+RwYMcH{6*>*B5P_4dw~vwy&jzhQ(ez>MCJqx*@Rf!RoT*s1k@Kb zk0+#X$E-OZ5v>Ua0q}hKejBAedKXdrfH%JVK!fM)3gO{8nUqkH?-p@SYn~DrwV!cghoR089&YM7qg|NzIiqXLJnkr`h>XZn>0Ewx!fm} z2@%Jrnv7_N5DPLBm{OZAQvc+E$OL?06d{VocT>Znc0Fyml zid%fL)OledZL>O7imt<~4TM3Ky!eimK-Gk(4GAvHPgMFZZU@zI_5ook@+sVgUKNU5 z${|<20ZXT!xiX#kmHcd0L^5;VVJC?&{&`6MwQ?m}xR;!S4B*+r`*Ic64Z$f=bEz22 zi(Hl8D?jHFnfy?RF3HaYLaF?_iB(meTov}^s(kOMmk#0d>y2u~`L-F+efhCInM5;b z;)1YEa7RcG{v@8Hl8*uklR*(z!k)OoOU^}zEMdbg7)2h1N}y}fvu^1bPwuGlX$_vA zSqGM~%Cl%qx1mz|vWlxH)NHQ)7G)h1CG37vzPX1TJ#UBx5mYN8`h)?91%?IMjy5iC zqAKh`=_u-0f1PmW39>~-(nB8VTynighQJx$uq)0-9(9~++ z0qg@pw2hIsN9ma=zqJw@=TE;DY0eIweA0@AKo$dD+t;S%(DM z^WyX$wlL3xo&fkm35hR9H~btCcr{P&+DFBb8}iH%QBd|8b6oRUo_}1EzJ86Pw-_2{ z!sDYf8ZV*oK?!;yC?V1CXTWyg&AWRadZ7e(Xl`S|Ov3(Cj!e6EJf^n~QdLJ5K^N&k z+F$?_0HupW0f{DKcLih$1%g5<`Nth)98galtg6rA8i%Y&hFr)bgN|G`2Z7%W>tp ztUV601@$+J{emf$2(x*5wlqbeoep)e@Le(bFXa+ZHepv^wsJ5DO@gPq2RiNF~?~Ag_tIVpyDv z23!ANnEI(TQ$ILN{j6Xm=|XSynX$rJJ$Sf^T3R19T#e-MyW#4j@}?(Q2qB?grHvOKEXCwbU+ zRE4+bHEw>De*6G6L_#gRb%2^3ZIDYuOH#oTC<|ER57x8y=;UY>vu@YTqt(FDj@Q$H zBm-{y>-vlt{f6qVN2?zZP2OaDN(tN?#8k%}DK36DN}QF6+ysrDRK&_0 ziAkJ`QGX&lIp+vjCgBMMr{)|1{Uj8{6%&Fdi5;92#)z{B+PD}75pzz7Tgi4N9U(LB zG+YNpM(L1iU&4zeB-otOzMsa1A} zWupIK|2+-_mBw|@}MvwGO_URSTYkvGK$IP3*nM0AH!oqa#I>AZuzF~I!sL-VW1fYY7;s+KNDiW9ZRvmgiQewVqtS@Vy*lv zGC%eE-&6gGVsgTG6(=^y)bVPx)%W2tCJapDfBe9pytS-Y`w{)*;VPc`Xqi=)_3_3@ zYM3>*HFkuW8^jX%;R$M<^{77lNOdx1-TFwii#L6gy4t!=pMIF?JM90%(9)v{UhNHo zgM;+wPzuz98E#m2e@`XSL(>ecP9J}qD%C$eTJ0!ZmKr15tBmdRNvbZD2u<)3{V}bW zq|Of#6X^fe?*Zv4N%z~I4Q~p}r0A{RQ-gYUwpBW7Rs1=tqhj4Qh0b1Zyo#qjn9im? z_#NJKyjmjn^jvv@nm%sF1CY!76umQODHi<_S7y2A+{cBEP^?pB4 ze>>`xmf5y_Q&qUqr&Cy)AP)J7^FzUqN1{4G+g#9=K(d0F1%jEzWIabP^VrGi)M`P> z3LML$wCa&yb7AF25k?fme^ zYB0Ds{m1G~9@Rflk6AJO;!o5i)>Ew$PF8;jViVryRMl>+X?^Wf)h}4M7Ke9+y>W~F z>NGXjo`a*g5JtjgLMrSzts{S~`dYy)3-qzSQ0s}cYM-uZgFW}@;is#i!EN{HQ%+}K z%Uf?hU0rGg=j+z8Gu6IUaNU!7{4Z5~kbtr6zf?a8$^KMzmg+ChgU?d4``k8*eQHE% z-bC0W$oxIJWUt$K$G5!4iQD4}Lgox%?Hb?c*3=yRW2 z3L&KujUp!UKvbMB-IaNJjSzGiY)6{R+^D8f=#7+9Bi)sCjy5(YZ6j?vH*p!G} z!nZs0hP=gZ>fLRG(4Dm{2VK5j`k*&e+gp zWU4py7fQSbG2%0o))OQ=Ajd|9g1URGxP|qK#iHdc!K6fV$T1~`2d+09J(%NHB73*| zG&Xb+cLCZXCVZ5?&x@A{DV5@qVR)dK%jQHnqKi#d0IBX2)eUdiyldG=Rgt($9a zDD<)s*THj&=+XnPQs>9ugN-y`Zz_u$SQ4_GyJH~U@XD6GLjUP1X#f5C^{dn^=r#oruVH^hgLsUIWhfxLcpk&>g$gnPE@j9 z4Pjlj$m4$fQoTBt$6+(nk;M%UFGlE`&;%}^&;Z|F~Eu+w~| zADO8RE4qPGg;>>ufZeLUmuo+ThFOikV<^i2#q&wmb#c4 z-kXJ}^hZ5swmO5yd$ZML7F^971dm7b$8*$m*5mrpYt)7G?!9ZET=(mJua!sZ)N9o} zmbIj{;yP6kv>w)jZ%|4-T#<@bE`HE>KEpJ7UdR@t^UNC{c8m2qa#G+?@A2Xds@8h6 z^@AH27Hy5YQT>2g8*XF(L%nh1N#R34T&caZ?Ra>3zwr42Dw}Rn7erS+ggNWurD87Q z$0O;;4ELuus}n~q@n8WSn@~_xA(Dh}+Di$BAeV&V?8$OmQURsSrQX+XR?fl1F7V=@ z7_l|S9UyH(@Q`zt(xf0NsB2`;mhAWDhxGo9>d=9vU!CR1P!j?_lD0)yhMo*W=_vo)4P_9^>;Q+H({kG2&*WfEyoaz^o&E8DEE_?`Y zs*Q_PX>k2D`mM$AR2#3+6)kFD{BSv+_|~8WcZtYAT$bhfr!DNK*OazSZ(%nV@Gf{z z-4NQrAO}384(!u7n?4_qX;E&J^;#SdT0i@`)&&o#HCAx>v-+n?)R^GLXZ5@#tl`7; zUzez##oNss=KB=z)e`jkoOg)`H_Q#96Uc4-;lt{kVC` z3^PU4CgG(>hyUbCyWjl@e&mn3=jpOTY2>0O5j|(=HLdC(9y?k=_CM%-+SEoK^B+^g z{?~)KAv2E8ABWc6s!Lxgn~)htPVaGC@q`*2XB=Eu?2Uu_Fk~G0G7cQSym9EIFO~J7 zs@I?RFMXfy^?knns!a_Yl|2zz(!R`&2$mAlGEWRMO&qaYtcP{kb7g%B@?`Dh+7IZ9 zmYXS>w_LpnvwZdnH4cx|<`wF|g1Kzm+_P>iXjKwM^1Fd~lh>h}^j}vZSH7=jty0%U zyUW0|0PYv|2mBJ9V?=1MklPRSpw;ZNALtWSt6%b1wi-#~L*27l?c+)@FEg7ho3;HB zNA5W|NpsgpmbE7o%OstqB>N++KYNO4rqGn(d<96kgH|<77=P%dM zHSA6=>x0)o5TDnNtWjqbykHbr#X;>pt*V5JspqUgl9j;en~(;iuIh z)+hRdr`4b!uVv4uLFyA3G-BUWpZ7L5&M;uq}I zdL@_YUbr4m-lQK~uLkzplsXQ}GaPHc-SPrAi1eZ8`|H)n>MzRTW=N1g=$YldhJ6BZ z_y#@VSr*7O`t)a&D%t!NMXKl;{M{Ije9g??AAef`$UDXw> zk>^qV7CoA4iKY~(x)bbi4V?a3L^C6- z^QaQ`!RWeKzaDAgR8>gRNd1cqh(C8N*Y|8dmVID3Lmp(GuQ$Dj0_pub8tkEgz}Ns9 zWw+}N{q>8g2B%sfYrp1!+tC?R(WQUBK^=+u@5&d|82HdXzo-t`Q_)Ef+KC%kzK`nP zY*atw(Y8_D9$a3g8>^jzA+k4Z0`_0()tl6~VCVgM`zB`fBR#$ok@O>dQ75{*d-VgI zRB*3e->Hrqa-WDiIC>+KhUGlrC8hbF)yhj^X|9-HOkF; z-b?CV*8J8vFN2-IHSg*5e?hD8!F&4azo=&;ca0C0CW8{UucwSyU20qsA1j%zB5AI` zehJ23TslNS?Jfn}a0l#&#%WXZ#PAA|F+xV+`THLnIEDfomLsUd!iLO^lii;)h$PBB zvWR?!+S)bB8T`)J=ph{2MRY@gizIzCDHEkrA^KO5=J?GPNu`-B>G^)eyCU|KBpcCn8XRi!a!307DjoBy)f$J zOQ?sEQ>h#}eI<{bmb()RdPZK$Aw1|O6sL9?TSWMhX`*>Ymz)wQrBA}cqDw~3n@y-i zhGIfHr6f?!G)UzfPsBTsasdeZ2k0G38fe~NL_{uskaJE5S4wyt9%vE?v(QzzI!R$V z3)6&ugYZeb=N}wcD(@m5TjPT*@;m3@v7u)9i2lR?O!O2Vn9gmrvh_$Dauy~9yj^HE zx}`*q>0618k&_nIP#;eNgPW4I(z-VNR4b#7k2vXo2{V65cl(w{p`cj_bfw8Z%v! zTT0&dyVe!d;ms`LA^HcK)jr>=#?T=ct2Ug0gk0c^Lx$#e8W`}))Ke(o>V@RkFUbxl zgEZ~Q2{0&pbCI_0u`5HOihE--r#^ztJ~o$P_k)o*J4+g24F(gG6;R)Pf{ZPpM2^8I zqKM>(6Bxj~FC-_~m;;64Ic^}1&!?DZ#KhziT>~8~&FWyxC<=P@u|3sC^3i|4o9&@0 z0$gB#B>m$!mvQhBFO|~QRid`Ube@xYixaCPtrg(*MuR2sQhogEYCvfRl}fMifTr7> zI{CV)IMYVJU6sb{nMG1=ODIukhFdAaEfG~LUd$BStlxTF z)g~(puiv)=0DWYgdO0ogg)R3!x*mA#I}iVQ5E~3#Q^@I;bC2) zUXp`yxB)t2L7Q>glv6zVE!p*zkU+mXrCIvDL7w!xLkh@o zztA#^Xd2S#4ydx%=`DKQ7B%s-W`VziP4GKoC{-Jw5t?KY1pJJFzf@uWMIf=17t3z0 zFq;Td+E#|6FpFDu3 z%>I$IX6I(?>^wY_()FY=_qjn9O|10$679*yMEpl(&Y0_kz(sb3CV>w1Dk&%kWum zEj}3TgPTteH4&D<3c$A8!t1b@@-xF_fQ|WMlc65jK)kgk5i*cRWvy}BB1SmO7-n9f zb^{O=6?d_-;mRA&%>tUuWmDWrdpf@`8su-M?#8dtq*SKVHS{|(6#G>#njSt@-yY9aDK|1xPUt1W-%}aHW!NUh zw%{AeSeY_~*r|4uVQ~wvzzrfDiP{VYNcOE19S(>EC4-jPHw)~Da}AeedwkPMq86G3 zMdUh2R8H=aO4Y0x69{<#| z1=+BYtiY-u4XXkJE9qIg3?&;@rosX{nTAuUmkd|(8pa)LtT&wLfx%ZeJ8v|j-CV$? z3UF{5%&xJC_afxARE152solh!BvRCEFBC zyuvU21FWT{RtB1ah7Zg_sFz%@m_Z!!$kw56eor|??NSYXkK80D#62lqb5er=O`#C> z{BIHVo}z3Hi6atV+g*gd4<1Rvw6Hl&F^4bG>FreLGvqRjO|UaC@Y^^&ixP`AvgZEEzU8R%+x@xq+K4bE$N-Ell_-1BPF;;~tX&1MX&k!Q0xh_qVP{ z!ZuJP9BPhPNSKd`M^Ooz2W3!Nz?om_?u4>o1_C!;L0j(C7>r)o4HqijJAxP9gyXrB zwYUv#9{`ugDwEmn#^|_5R$M)fHL~^MzhmQ~3J_)&?^civtIVy#4gFGc>Tj4;!wc0H z8((D^K5}q@;aoXq*Uks3dVe-A;T;;FUlSgB^`TcWWU3dkDtqPF;9MbVvUOm7c`lB- zdFR=S^f@2ECpFO_v!59_3e+1-iqBQaJP5UcoeDKz^)C=eUhIm^TiLmnidz6C#Iq+hlIbpW>NMjO#wEWyiy_Kkwu zjkw|zdxBi(oLI#)zDPtXLJw7?FmZlKqKw)(8nqW1wgxY!I*4Rhif<8DB&G`VVrT|C z8%q*>4KBoF0Wu2^I!&!K)n4TzBygoK)tinKwX-|Op$7~l$r%9*5bX;%k1LPfg!;yB}SGpli+rh7^)Sr z@2N=Sj7t&kZxv6A@=^h_06RBD&dq=)2@U$xPta91=y{*0%HMu0A~|7ky5(tBbf^ol zo#PJHBP$OO+Ol^UB$r4S8qMc6$;_M*6r|97hw}oWiWIq{Wd^cech~S?Rp2%(+DPVu1#u)L>tcH46(+Jya78{>h z8PapZ^VRFvr+A4&>4Lpjb2(r!thuqL=*`f=Itvklmn7oxbVv$R3C^>h$9bM2^DI21 zVUj(zQ<&s_8Mc#4Ake?Em-$VG^y*Jl@sz!>p;>KS`vEpoL+$fnVhsZbY8ZwP;HO0b zpM-u(AjlRY^wOKlBl^cZM4ynDJZw@P9ok)oB*Sa8(Eti;B~T7NIQ(jsU-pw>)>H_W ztXg}XLG>jBu?GUaw$iMMrC#lR={;z|U?|}_<#13OXP{d46oELc$b)z|?5I))nkZ0T zVzB!M#t?YJwYn1iX0HS2U;vx_icI%6XLVm+^qGqOf&|l!{f3b4F8MByrO=RIrTvIu zHMaH|o#|}1*R)?`52KU8b=&&m&(wf^Z7^PG9;P6zP;4vILqAu8_YuW~SwpB5P?@t5 z(GpLw??tfZs(0iS5Jy;oSh=45xvCkwkkcViP%y0!X}*sMMnxKPhn+XY9d<5LL)%D= zXAX)~>P?@kNW8HuML-EMDg*X+SLkg>)ZyiN=jUobEL+B<3Xy7i$$a{Ve<5>cq$CJ} zEC~@;#@N$Yn?Xdc}|(ur7bc;JV6pRch>U!ouST6F}kjkKPAZoeu^ zU$;xm3%U%T1-8W?u}_NMtY z0Ir{$?Jd_xj{Xw(H{^cx%`ACCYbtNMyf^f|dAz;+(oFe=w2;r+qZ{hwtxloMDUNuj7Z#5D=IOP7*d+Uc&CU5U}Z`EO!I9%>a zFVB_+`-WZIxZJPin72OOAU=J@)LG@d&73_)a#j-LM@G2rDU+4pqteqg51LUBHB>5n z&+94ifJ%7l`N(vXsK}D@-JK?tz0s5- zT5@rcj~~BkUfDyjd~Kg7-$OBYAt>dn1BbVcZS5t$#NKE$`#%Z{$tZjfwmV8N!<~TpJ+4 zVsVHO+5_8BfH>|oJnY6Qz38UuXmD1!e(I*`c+OvQf%r&dH;Ut(Yud)!Pxt|*YOaOovbgnyv~>X+;{#aU-Tx+X~&YIG3ba{ z%!@&1h}eQnFOTc*)i{;8@MDDbsXzjU7zdJ@>rPb)WIaK z|VMWfuvis%yT#m?vZlhZNKz~@|92LaFVPx2;c2ZWzQY&XSEr8W$!2)jcZThUR zGp_KRGC6h6cWZR#<>h_#>abJP=N~4`Sn>GSPN!ege-AtL%;xD4Gn)$|W;W~g16s56 zt_TJIx9Nk6oqhK-eJ}0jyv*nCl{i0V=AOK$d>_4Eu2aFxy;s5vUDKLZ>f{EQut*s* zb#1|XeOIod>KY0-Y$FW60W`N?<)R8P(fBcMX(+fSYdWj+U1d%mZ%&Dz)q76cQ*)YI z?hFafk+sESkxhTDoCSJgxzn$Fpr}ijQ*U)M-kbGT<<98nGKm%{tfN4%GaG^(x9QI+ z`f;6hg)^|1=~+L$s=~RCN&RJ|Bc^9fmCj*UNVdLK>0E0C*YD70^l?VzZQH?0Dbl4Y zs)q;bck1ygs&CRK^>v2t(Z&KNgPW_JLwcE5jMBsUIs047_38Z_F|lp$XXfSWe$KvG ztE`savcfR3y1Qo2lf&BT>NBsKU&H@zn9B!fw(}I{$--I23b~{VMxS=8>%I zH#c}^xh}5iHyV(g76V1U(hFj=xo5KRb~f3_y|%!9r_v10CmlHr1@=Gm3vs9a-f73y z6rN?%j(5|vLxUw}^oKk>usGS@v5K3^gD8H4;j-L;dhP&_Wu<<4fOA6W?s!%=&^dsC z{Ai%_u(e2AgB(Rwb%UJWSe>nN204)+X5qICaR%^c9pbcN?tI};=UD5`*5yN;C05zu z3J-^IHh|oP>=u3LzRuv{N92SNq7Lzgay+!Jvk$joziJ)}hB<%5r0um~&f9xk@ptca z#fR@ESWdoJTMP*3r8&-zz61PgvcV5#-Ifh}==QzYNc0_a`{_Nw{+TrF&(lZj&${R} z0oa_+J;1Ecrw?!*0k9X1X47~;-#FSCfmPwE(auEs&T=7-ArF2Q=8NSAIwSdbx9&* z=@!a{n;Xn~j(sg58YO-RIG8BCMV`86j5Bl?w2)oOUuWCB5@O9Eln_;w9V@f{QJE-j zmZzRL7Rqp)zHF>B>=4=Kj5xq%$5uBI_FnFPVY5pV*;@%?Y&NZ-p%|xGx$Y42LGYxA zz3{EEPF0=cLF>#xu=KJ(GKdf=aW3%BQ74lg&OFo^!PmEjmV0uEyRC|Bce;em)WRtiD$HGMZ@0{f>^ju>0lwfP4|B%jZFS*c&hL1pXqIQ}{Iz zzWhY;U?nbsuXs7YQgBpThW#P_8-Yn?i^`oy{Nx8crvuyzMEuUYTfFDy9$Rbow7zi! zn>17;=-{`~$(=g7)G zl>;haH;}S&FO_2eh{whwo%PxE8^c6OOZ^WW<(wQ|R-U1Bk%`RMNw3NB80R-dv2#)G7sf3D(_VC}qguaztaEPAI$1A2&N;TWGM7wHr1 z{Np0qYJ*|lD?U&Ed9w3s>kK{hbf>CfvuHdv=K&y(sSaUgdBllM-=rAs$qES>E6|`A zRImwL7DvdykQb87m{6B$P+sTtnQFY zFd*=sozj6$IuJrA2pJ>VkXz54vi9Re?u^}J2ajV z4~XX`~G`dcSU>y-7Mdfeb_rJ5oTbH**{l zymIR~^|-}@Vv^fTNofeB`^jmxm(^S(YI-SU#7NH@NxDkx@0jOIUBPBaXijGo8=|1O zWmv{&Z;_KiL0)E*gn;Ra)uQh_$w?&Nl1&OuY@ZKhkm%z#W{HON(_ zZXUWk>I$(yv8M)okbunkFqq_3*)CPKo1RL27?xnk;o+`4tsR~=xx9K~_@F?7sy zuY<*Q(&D3bmq{6BtOhWm zbMv!s*W3;uR#I_HQsjXzKo$yjOFP|^fdYuuHhFX+0^0)p@ayi83`7dz##=IkO;Hei zHp|82kN{)b$zyEe*cV7QhIrr&aG4=!OO7-`nr4WqB<6SI6yrC4`Af58hUndw3;mWX z*xvb#@Pf@9I(-N)mU-rDx=wHFkpMy97#1#%b2#B!1&L`pV9S#SHXdgE^;&Q((hD)Z zla(T#W8RUOzH*9FRI(0$RcGexrk^@}bKb0o8mOgU*(Rxk5=jtDy>Ll_z7*twF-)lf_i%5E^H#%V`MjCWV1f}X1e#rvD6z3TfD0ckz|mc&I_iLK z`PfYfe}cSvdL*^S>u?59w@d2wTs`t3lX`%1UAVc*`R#9Ym`Ko(;WOV`6W@g!1`!bjwZ>GY@(d0F~2tmn&oRV9|Z(5 zPsZ*~61}*0W^c#RK?dqA@>qsZpu0;Y>r%=CVX=rXgKU$;wscR~FO%(()K2gG{n$kE zIU>maJ}ZUwdN5#_8a$wOz~8v|M@qN7n!WUPNpB}Thji4!DM;X(K%=CCHQr=;`SDd? z{7d-&?vi~-@JbjuF;k*t!nG3$X{R-5+C<)V?YiPP6ZOv|aVaIbFy_g( zXYF2{$-&{~EDKEb0mqH=Iqo;RxEnDT2wM8>NKHjjz$Id`M|NndXU{c(v2=~v{H)L? zGK#;2hbR#49%IVo*CgC5yFL&JUus{E>H@I?V!;F2FTAcdmLww&GZ^CzE1^+v%`kMI z9KCUy27;o^OHE*d@DvU3P5@4{qy(bO`r0U88~JKT1z!nYMJBlL(9zR8nrPl` zfJ}=Rnf6GR_Ik-0Pd}?io$2hK{BB{Qt0L83DG!G9k}6i0r@!t|ya@Q>h2S0&94qA( zEqMwa81X`0a>pby;>F|U>@hQ%Ds*^!t&LZ`^oR3-Xy3nx%!5sAF1yx!#+w zBdxX2+r`i8Z+_+MJCHcQ{z@)OH<*@u{`yY6F`v;Fb(dFF?33G{ z(O7YF$|r%DgIVONU3!Sj;||O2y$}jm3?u$PnLEkkkG6wx37;XF9Hbv`!OciNFY5pN z+WFz+E|Wcr7`!C+OBZIaBDx1rCrz@V(pDCNy!o|W_#3D8FK`6u*%8+uJ^hF_GFd~vVmkzQ*S(-T+ z!Kj$ZTclee89lAfoZ<{D?IjFt$k)q1Dkn--zJA?*?)a$O)nES9DN8Puk|O-9lV~Ja z%NV&Vjf#>-M4?ZggBWLZy^z_$l1XOEP@f_gX%V7R1$~)liu>Ab`v4~$ZryQWUFf2j zzC_M9ve~Q-X|RI^na_@Mo%py;Ne~h0*4uk;K@kzCih4^EX@P3xBRYPbQ$4tsFqPSE zSZry@6OiWWGtP7RguA6F5w1Y48Rt3u%6cS08`T(ct5E>F3UK22>cC zirpZ+OWt$Wg7cl~n21xE{k#a6WN(aryi1Qe-%;VTIJNOBeFnwsMq%6$rSg1HN;jPE z#Kts9sz^_d-_;v0nWf#t$+RrR=>JZQr7co6B}c6;*F!%oA2@iaB#Oke;CVVD@;E?YtyNH@3Cwcg|0O`j?kEBZE&x^i%g%)s=1{a)HsI#yBXppVOb* zTQ!v9(2p;3oPxRcN%&CvR$YEM=2UC-ipy#5ANuXfor)OuF8e&HaWycz=qXf1a8~!t z00+yJ)3GwRRUb6nnUuFxVg_v1Qx{j&dcU{m#nYWX2A?h0H$Pc*AmJSATd|Sib^C`#+A-sNnO}x^_S(}2{nE6h+g#V^Xq0-(K(hipSUWPLjt}GdNFOpxmKf{_^4ZU}(Fldg9z3NCs{uehG+{kPtAy>p5+Q=f8!Gpyq88yDCk zETiz@x?0RU-`uG64bJry^_Iki!yp@H9{FXv2|aqQGuZlzK5ecu4F31Jxz4ycd^3vR@qXM?`+M&~dpc;QCp_`I86rKfjmb(1qD z_{gjJ$2U3m)3+TrIS-UJuF482->?d22fX|0vlF+kY<=ry&c_c}nw{E`ei5O;xa9$H zw>_PVTB>bT>%Q|GE6Ak?!*6vuN}IDkHm%Y*^Dzm!NuMy^d4>qs<+nM@tk?D0+nggw zELh;orreDSu!EVe57o{LvaHtN-+cXsc5VtjvrV7d%h0fHz3$T$9tAwIhWESQg;aL-kurHafI~F=Wwf|j_%U8_yiuja=ar&q`G0dE= zC*A2>sJiB8TP_ZX3-h)`UYK2^V>YxoTL0xvX6iP5=3N*#HR-u`nNdA>mvbp^vAcow ze0}iU&NYl>&E3u=yp3sgUW_-cUSO(8gs|U$1a1!;S8I52YR$RC?OLq|EOK6uNQa;C zjIeT6J%T~Qa(&SQwS9EgqMDo#D5N9*tV-xHkFwA<>C+x%MefwgABDnnE?DXeOD@Vy zg_uoQ;d7Q5iRXLTR-u=vA1C%B9>2u zCcr@^=4Mj5;*&5FyDOH}2Sm{!;xU4RY64x0Q7#rF4{(8or#A|a0J=)FFZwTzU|0oX z+%PsIt&f}H&e=(q>oQ%2*yIa@@_9t1OcGT(49zwxQzhRxOX1zZwJa(xgjo=mKhOQ} zJ3q$?InkyAh#lVr5D&T`i32EBRsaa1ZQ%27?LOi3Ny-HB1s%1w!@7grdlzZjZmvpg zxuPT59IzkA3e#BWezF64x2P(-pJj)@h)b2GI?oXIq>I5Ba0}Z@^jxo>>Hy?f2pV@}y>C^t~3@+$- zk#6PL>+}tOcE%9raNoxP>%02I$DK;O^UqFoL8{-`AST0<$DE;+z|Pyuv&QFDYf9ze zJp0$joLG8#gr??gRNriG%Fsb3pPNNwb84c*loOkdV;4 zArkU;Y$!$r=vPI}id0eUS2va=E*@2a@L;Ma80>!+#O0(Fd=pbdEYJ8Ob5k3B)AFp| zSckd)kGuDdkFxmQhIj9@O(P3QfF!VUC!vHM1f-Y6hF=Say#gvIz52BvCMaE$fWQO{ z5>ylw5CmO%QA+3th=2itARt9iQ0ctaH8Z=}K=Ajx&+~lVKVJNp*?aEmKGV*enK^Uj z4Bo+*5F?Q8C6=oVpHOkK*XhKHt=!+lnI$*!p>4E*MHMZAS$;hy)}AW9JVSmE6`NS0B3dcNfhWlh=g{?m!gng=Jye5Gmm35~&7XK_|2 zO?fI25*3jls98ut*=I&_^?Eqp0SSt&VzeZ*p?m`$nWbhBD@nIpotIo8^d>okbz+|- znDCF38<$`Xu}dCX;%RNoyeu=8V&Fd~pIz#CB5C-Kkogr#%6#+#H0TdY!S&OBlu^sn zd~n_}7^>zUo@3D;c|N2F!ma?F#i%2mrTasNhMTMp5Y$m;IRC2jpx;*9aA7pYLhp$BY1d4g1-y$ z=A?C=rq%>wq3M73gX=Mb8PK6V`LkQ34hWd)sJe$YIiaNHa}HaBi-X=?E;w0X3OyfumT=0*B0Om%C9nggXb0J7MZuf^B#iC zjh=g)m*ndkJ!8N<5u32m`cU4n3A6c+WWPvdPoGEW|Y(UWeN;io>&=GGnu+ zV?-JY={_lkZuT^Dt;2c`Pw&g`HhXr5UUhhf%4u6Xjj?GhcW?3h2Wh8n_1qSLi7Qz7 zvW(s2sZr4ZSL65!ReC-QRub>~yMuIZ@>G{Qw|e#==X19qBfrWc+dyQCthe3s27L)!hyqIuonEZaLN60U}_Y5|}4CV(;xFoq{ zhbJRT4?HR+AK9wsgu_YnIohHjk2zLq0?nly?mT}+V-(aTpPk>EKwEIw8 zY8XFB@lHgiHgnlZ-m!Qv)1^@FF#p3WxpxQJ3U#y7ljKqLiBt=$;F`fzDKR*?@S(hY zCo140*<&Y;Nqi_{cX`qiN5n&j8wCQS6XyMJbt~`Kg<~rt<&|BYmDZc_z7;=s4mh2| zL!85XDSJGzmi1$->>n4=Ff62(drz$N-X7AxniDP$oP}a$PWa?=u1tAipXWAQ zx-4h(53yzYpFFMYo$-)*YGer<20pNsifQkU2;B4J+@CzQbCulo6B=x%yuZ*>%{q5Q z{T1h9BP7jIC57uCsQ1)(%v+8oyMp3rX_3o0O)m7@E3V481D^8MBwTxN zz*Ezndp#in1GXMm1ouDSNwOAPm17ThDpx9|%ei7OUEfu?_JF5O@+s=kLxM8K=>&~9 zw#%4HMzE%ut!FX&yB49)imGSb>mmh_pNJbtg zKNf*(t2N7|46BUxD_kU79`<}-jomKyANJI#kDETQX2q5TSFxCgoPqFF)=lAlgLO1X zIIY>et8n`b4z)Es;&~~4Wq9|Cc5^JVR{>5`^7Iym7k}jVi z$s?rVY?+B)#OGDoa8M_J-$)Xsmbvj%44ero_O+YIQ8C;*7p@L5D0mNtQB3mQO60hn z;}E>@&IUPL1s;Zp1Ikbs)+rSq)UbAirD7C^6TC3k?E|nm41V4aza|VGLlM6!EES2t zjc6EFM|gic497MRo(iL7D|aecFid1|`(L=#Mh)(8(deEc>mKz~sDTX813U7;v#+zm zXbg-2N?pzfTTJzgMUppP_BrZFx%GTdaY;G`#Bn`r^qkk-Gm#v`c3ANx@w&{A+;Zhn z=!@FRAC7uNauFQHS)3EP+#jG>>Nw6tkZZkY%`(Rz50=Y%$Dr~oBYPb4RL;uAWQ6PL z^AQ%E%!QJ};glRMQCa)hip*c(b{P3RoWA3N;*TOenzhJQ+hquCX9$m11)SyV!T0DT zs`U!+8%uEdkW;NUDiW=i#U&6LCzF#DxH4B5eC5ql{R6Fy6l2hoDqDEq0)dq|&((vA z1pH($i-{of~cr8gFQ{vrr~D2w@(m)_j^iwPgaFL&B$2iW6@TY)kgnE zF3WS#8k8#8Fz;3ey@G~&i;w_znd!6+U*$x3E8Z20h6PJTl9$1fkrzx(VgPB|gZlV@o6+%b^?;3a|fS-wnYEfT1I0Si5+YEy98*EpFFD!9)u+u`Ei9+eJ~SC}<69 zvpjjq)3Dq*(54pPOcdD$|Hpx`J$K(Vo0mCb9-A6Lb#BSVLXC1nL#E(S5bR8Wp#&oO zwsJ9u;hX&(K9=O_)1Jmzr^9p^enrT#j)PfsxOWSNlCscX2F8(u#?~;7gz?ztWmvYJ z!_nOF4u6(w<`|2=N*=(r0bj$e+$4?nI4X)`kFbY8mbA$>f2q+7wiTs_i&f!iB<8To zv8%;$y9oV!5u6dB5Aa}@YDx5Al@=)7C~=Ml-5Q>$xD11zJoFnD(sSk2-*9Mtwru>n zCp|)sU`u3|-*JBLj3StWo$F9sw(8D-Jf$xp>bD^ugJ0^zUGAUIg)l6YNDZq%yoNN5 zWmBKW#+T)dW4ku8@*?y|Lbs-R^yCmHA*R6-5%me8XHxw2X96y-M&LDk5=RuP%yfxx z)wj$z109_zQ3`}1oNvk(&R}^TDC3W3ux*+z?LRQ)O^{Fif%)wO`N1EU(8kGaf8f}5 zzD)lUXQF4y2mgfrVW!Of(^IDbyr~|cOvOB>h+!gpSs~;YGnPXIj;^SY9!F((TQ|}@ z1#eHD#HiyCeiYQ+GxE1TJ;@QfRR=pSteE5fShWx;=Pgw^xFz`fXKERF_Ak%NIy==-aefJNH-2CI8yb%?vhqKegcix6|DX~-k#qm?T!mkUIp;Cq zm?F2H_dH{L8Y%nj!aa1iUGOBkwqu@%A#IZEdciXhmtS1GfL1yuZ@Y+=$ddyvVo`if zuDpoNmbXM^*@80wR7jxOu2krz0U-rCiit>!2cHYsm_ltkbACrGGxO6Xp?EO(*Q8$ zG&y;Ir-ij^rrb5a^DwhpYanLv(`Dy@o@4}r2cpV`%e;ZGMHntu5A>9eS7gx=@UR7@ zi^39rfjlt~I<$v9yd-+ zNsJyV=eG)}D|=t_^unk6uHkt1$w?JNTl{@Ov~!-H^pU8BzZ*qMRF2CjDo5{OrDJBm z+e$K>qBY|CT_2z86je2lK3f1RxIW^%Q>>R;!^DG);G7{EvLVEcAM~#gf<2%#_=>~; zV{=E6@o>3mBdRP)j3_rQ;(a7s z3H9XAmqiO{M~U~WWAU=+9pSEXB=M4SoIapma?}}4x~E237=yea za&D~1ggbzPvBFbM`QT9(E>f5gCgaM8`yzJYW**!(A>SP@(o%knhd~}RnWz;tB5p@1 zaNZEMQMod|jQ9^TbgGO<1<^8b;+FVN{AU>8c@O&1Xz#eQ@{I{1jX8ZePUUoJoXD|O zL%-%0m91~5$a~zP3HE7*xxoU*Wor;&2CW}X)hD%8hsab0!@)Pdv1^`x+XseI8umx3) zopjlVy-DvGJmA$VEUu~D9)=}NsCQX}+9y;oV^z$WfWLXgVEGY^L*POjzv1o`pPo0r zXGhTb@0@zds$ngJ5Aw@+P!7eh>!K2NxK|=KbsQ7vt{Vd;PF3xe zZruSUij4}8T16CQ>t1sZgLTIvni#f(&*NFTcn{YmNpGJPGpfn9`_?vc_JH!oF`1z#TmlRDA;*FEGRSCk@lTP z9D?N|q&^CJ4&H1m192@Vyo{^VVdCni1rO#L`w6hQ#BxoGS|wIqEiXKAa83@kP-{qt zkbF7xEwI{b*`|U>yrT$yhP}Ddp$$UD@kxGM$->c9wHJ4GX~c0LPMStyQ84C^`{egi zMWqDrGfi;)T&b8(I^P$!+;$kBFvGCPCG#($Ed`WPfi^%Zo@X&k?XVDMR3XkIz2BK$ zq-_5_qy(HCRTQalIb2a1K8K)^<131krhCxXTxxiiQgf#B!FmG2MM08<+Hj0rL=09l zC8e4eF4CT&Q*UKih#f~xtDt>86O-n^ac|Wxj^lD)`1FRg3~(XZ)rt~+$3YFVGXIP0 zh!zg+#L*BNg9pC$PK=eE$BTqV(SP9L*I&wP{PY@GiK{amDX_KYTXd!tGP^;(iH9UAzj&4m7tvly40$#5DCh7qZr?N0h5yG;lxNz|0>`?s!EHMk$ zq%33N=(3Aw)6U$3-XeVFA8C3g3%hOsQz<|y1+*a#^LNPpu|ZkePRw>-R?DTyqC?gZ z5@yS$@xTlp^y$7L6_3@OUpW6rDH~BtGaht`RL1w926hKdZ6Kbkl%Ih2`3XI|Mf}cE z+=SgLkgs9E8sn5nsXXT7$MM^HTo=ZO2$`QE(z1rZ4@n?z=Zo`}t>T07cKEsx02_kd zZ1|li@*HZZic!R^-1d?akw#5W-|L)@d}+aHNK$;a6!SZpYa5+u6k zV4An%BT=36v3G4zOIG_>h^oaS6gs@A!FlWl@ZIcP#4|e~XvH2Mi_8Qy^TQPpn;|3E z1W54{=}i^!O^@>fx&%6{ph0c@l{P8U^T-s9yb)C`8g zT6I^t7?OO#MNNUS+vp4$+!k{1TsPkLtRgB`dJo%K(3C@ernY)uJyHW^PFS=!-G9iB ztBAK!{wV9;zhzNj9q-02)3s|6{oQ}~+Ex|ePOR7Ot|mUfw#V^mVmUS^KdCNOICFem zGcYgJn;~g(Q4P`4x=>d3{@GJS-d$5XW$kpy`Mt}=%Oy2M?FvgENMJAl&65+NA_TVI zEO8Urc}m&DOjrQIk|Yh&UK*H$y7%jyM9V!to{fuThho=ucn`Oi?xnJAE%8(aP!XU; z6d>X5&#_ow_Cbcrp&c$fhPjWptIEw63`_!bAt>PF1}o=HGR5uaY^n#obZG*^`HBi$;8A29aHvR!98D zy;xxeN{;dZ$tIr%~7Z!$F{c_G#h{p*RR;N)s+J@KKl zK=#iRKVY+QcID(MSaEz`Ik^_>jjmLN4fhmTwz;SfGm6HY(2IHNV9$PkeK7>BwaSzH z985n}-XiLy{`?iV@!M};q=jpZTo??G(<-(IBdf(zOzuCWot|7#jupvm@Z}bf+yEKO zYalwnXykMQF$~G=wUcw8`stCHd<)zsPfJa{9r+vF2x}oMGIAS|*I>@Y+%sTj>8P-=TWv%L}4t{TjX^X0ZH0yALoK`*gb>u%eBe^q5J~$(} zUW6&RoS%U-|CHlvBhA-Z2rppET8RIke0^&q-!AWIDcYcrV_OOqvT`fILUw2+Sjek2 zlT%#gY+5%t{zdVqzo}!SiA}2H}`Jk{0+i(9*|4CBBkL)j+~*~%QQx*mUrfU@W`gUFu$|B z5j>odg57Slz(W63`4AgLg*HRESb$AI(3e=C;_zY565s$`>vPK|-yQFWe^jSOv#@KLY8*itdlw z#p`IJK6i-cu|C~-hqwc))ucNiuzr>Qxl?=^IjsOzh1^t^r#6X7kvrDnQB{X~vk^Tgr1w~21$nTYhy@p(ZYSPxuJR4OTm0d? zWeFzn)M%UK7qlNZL*xCbWGqZZdHWaNESN4oxCcsu+0*5=d$7QrGhP0AkGRuXI$hSk z7n{lRrptTpMg08fvfI6)Wu>{-Q^NKgHkd)_%jt6Ez2Zr0(R5kqK2TgdUEX(}Xls2l zU5>vGgQIV{T*Bvt)8*Csu-gB6x=g!YY=mw5>HEcfG#VV=9_!*4UkR;jjoT^vw-+80 z+0dG}Q?4(5E-$aN$I5@kPFbk~B-7t=Wd~6`VarbJx?A1?24%eq5x75)7dwb2!*}dL zR745eM5}O@{OkeI%r0qK7+p?j*1&sF-s2D?xQ05UBg%AOm+agTLdKs-rXT({$vUOo zL442xWT14^H*aIl)L0-__2%Vk3*=nOa%#63wD)yYaX{IEk>d{3+YXuYpr{ZA<2Foz zWOxYqdRVl8kQn?h1b4Pv@UZ9(uZ@)- zfhHxj$P7q5p2*B_4x5yxpr1dMBpnSt!|s7Q%IuA{L-;e^rFmye2uuKHewKFV_PQBfOb z4=O$;osZpDAUGH;%+ohLT6DYdY^-KZC=RB6Y}29qDmy%9vWvCSmD*qq9JUJ=5`iMTvJ&b z+@>Xe>ntk3)-b$_c=!Kwe3I{1Pksa1-J1?hzM#SB>!87D_`i)y9DGx1B|lTjh_q9W zNIA7Ju>aaxR4y?hK^cjmFjHps5O0Bd7WTkUKT(GD6n7!GucxSzf=Q93%zle|W|DW` zs9|onULV_2dx@2!olZ2l1F-r9nLL2Umr-UC}^I^Wl&?w)SE;GrAU+W*|B*8WGUwRx3nh#jNU z+HQ2Mz47qZTNd)3dUd{v#j9{=yS)R0)!cw`VhA<&)PccjZtPO=>r&O+#|A>U?9*y) z>~jx-82VK986;*v8o6JBR{Bxd<|R?XqkIlRs;t3Kj9IGkU5fcF=g5}(pd9&E6^pLIC;wCD9(ccdh>`60HgGvfkRnB-!`B0tT8~^`qrv_R&N;g;!x< z^zR*H0!3fD4jgJ$qqVKC%)m);>>YEf7=~aW(W`1&VcPs@o;)2X8p!1@i}B8jvi&Qf zhhHUsNY;N9!~BI;MCwiS@d|NA=%x6WiZVJz*mCNtBBP`RUM9aLDwa7Eye3VN^QVFC zgRhAJ|NE-<)`P_Z0oA)43{ClExnwYeq*1JwOtDz8F5iA#oWz9WKaKV~8UpxzrHmVx6tlw%$1*-Y7;u&y($RLy^d{ESNmb zB5amCnuW3As;rbPT83VYkBO1jCW|U^Xg1jJjGUJ(>fuuD|8JDyH_?e_{U0mEuf+!| zssk$VSlZS=N}YEq-73AqmZlMJqV-*Y=2!Ymt@H?}#Oa1GNP(wy;qs|BMazFvgJ&lM ztLksA1P4y(w*OTH4p2}Ivi&;k?xiTe0i`{#(VG05`Wr}W5<>x;ueL|(IOc_6bscgi zcPDsbr>ehf?3G-n6o(?bq*8o}Gq|WXs>H$BC?ac>>}eF@@mZS9!MSwjr|JOjX5oT1 z#q;2G#q(h1>v;ZKh@}!dZ`UZMv1)>W&Utc@&23Z%&mXdnO`o^ag4@XZXJw8Ro!W# ze8+rhg{d;v&LO=wQ<9U}{+E>GhvYjWgk341BoCv@JU^pqwYc0cLfrE|s>J=8J!r$} z$go%m4%eniSOz$Y{t0m?!>?HOE4@qAc^{&Nl74gLtQ3*_KkB{%nV@A9aB@iM> zaSXP&9t^n5Zhc#WEFpMIs=dU~!GE}D&kNdH$k^3X@v zkj;^&nX5dKGkRQ-gBHrPF6Qtd?f@gvKq6=y+)nIvDEW=URzLAsjGv|BK^SJNxZhFn zj@oZngcp(}fs zk->)J=vX1*=T+on+Y!C1UWv)}IxMdbz5`R0rSjT4+7hPmyIAZSOPHT!XJZNT*}KrJ z=E{^;A;G_m7V$7!agM_h<1JZ#99G_D-#30HCOGa^p@>opq0D9 zCg$`wQ2se!6VozBS(8Q-xHNx-%f%4GY}X`CiO^Q$ zaCMM_84gTlzIs5;#uorw6cz|upAp&P9#-X9Em&y$@v@C4Z{=U zigKdL3pt$f=V-$jzppPYIMn=(r|~(v0)zj{iUNF@X8G3?%y(&PitNdvY4`|MmA1aY zU0xR_i$|^P5wcQ(u>ICIc%^6%z0qnNG#C372vR0a6gwT6NPWd^H_yKZW3 zqrgE+lYd)pj7pH#%8SbXuk3B6V)+(ax*Hc4!%efW=1MFtO6w1*^y~-!zRJ-3z^pTh z`vFD*F2w`u3>xW>%~fSxzt*66MzNL7Xy(#b>5P`SFxDw`snK*hc*MKC*h+`wS-rt4 z4*zllLs53I?V+;LDZ0^0=Xj8nP7$nh;F1a%yUt37Hb2TrN53e=N@r}bmCi8#91RmJ z5-Vw?L(3dQf+CE)xrCra5acB6K!&MG;Mk+toCZDK9IP-P>%*qOO2;fh0#-T!f1zwQ zbxHkoCP=e_taS2WrL&NUV;VLu$Vz8kz)I(5Nh_V`RZMo3veJ>;rirA)z>37|`Lohp zyyzoI9!wUsvkFO-S*D2&7b<1~qO5fg6_ESJL?@7K@&Jr;@D~O@?qWBj#zZGDJQx!l zrW<6Uqb!3k)rrJPp@?}dl5dQJ%I0!#cFq)MM;Yh@XUACO1hPY`9N6RF@0k+WF;+Q& z>=>&YW+%uhM;QZ|?9c$nn=LDh66s|onpt`d<8oxXQKC|GK7Z#clN-5rI_4w2D~V(| zb(BbmUc^_6v>{FXe334*M~Nzlq@kH&JKy8ZdRof7!}nY8UbY%7B0HjN#}zQB%IEp@ znZ|t`8s2Xv13JgCKq&UssOk-NWd2ck!FpM5hDZ-TN1_qZ9t|6sNX*5On0y)RG~XzEepT zuP$WpvO%pcu(I%*{Lr-yirWEu%9uLs`cTxWy+>s@Xf?5itBK%AHICEOIzW&ndaG2uwV2p3t9qK5_Y=VQi8DJ&h1dwYY}h zI#jJPuzFBy39L5o6;~9+ONnKuUE1_;3G=fuU&`{Y9uOPiR^C`kH(x4X9tw~Lzu#KJ zJM_Bi&u3uuQ!L;pP|bovX{C|NiclH-CZs?+fqCV_#rzQ_9(e zA|)ikJwlc}CtA@sX0~Wall6JCv5@#y?wl>svFLbXzetUGGa@y{Y6Huj5b692yMQC) z6LW+eu%-{=O-3+0f;IivIYHL+MRT+@eK=1fz%&NJ8r6&e=8D=; zBVgfE&sh+f7B0&i6cyycxmYcIA`9kfoBy+O)%vFVm!f{f)m#}lVRc!2WE`XZd2?QT zO|E?HOOaV&Yw0Mt`b(^2pyQ9*B80UoS6==So9|eNq|6gF9(i*Z?#K=SyW*a4OW&Uz zf)z1t%(lE|j^U>55ST!zcjt!V;_DFFuZ7{=@(Jn*E5~R&UD~6b)cw^K9SKaGCwf~K zCdi!c5)Nj z5~TB2t2EF(0ENHDl19sZi*=2RX524dAudMFHF4`n>@DKrREim;5er)ahWV2px)rE; zDWH-yQtkR0`GrZ8RbN*taB{)Wap*2CSFCHHQ{SHb*U!g!?ReyES6DORAyDzi;BMhYPXWLpgB>Ll3K}HfG0X5 zaEz#!8sKC3z-i4F{g)x>ElgTdty~IK`6WRM<^iOcr&4L4(*%7*zR>lYQW~h%a@58l zg5qU|<+?VyGj0bNJ{JDHblj(i6F)LrIz^oS@|UGpc>V&=(;JFw1gGq80F|tf@&SOU zk&js;y3*m4;fAke7Z`4folHU(3SK5|1WhOCBCAJ(Z~*>B(0+g{IPLbrL!N|gE> zOI<;>S*c6yT!Xlg%u+e|hKXy9IPni_LiZ5pxo#vrgrF}-OqT`Il^a1zN&OhJ31jy) zI>iT8A#NJu5~XLYjvI$K@t0~N#f?y$+(>K-L8JL$yaHj~cq1r!HK;F9*^-&7HT6H% zBJL>57$^IfxO(eAY#HMc6*0_@ZzR^6pyBLDwgN$yen_l0!Vw{!UXrv>tIq{4icV z^PQ+S`PPKUTif&~6YsP_Lc_u%T#-@HF|lRh+-1wf!@>)8K0V4<@d>3em*T)}#oek@ zkmA7a=vop7Y9QTj%7J3sewIvVc3z{| znH*^Sbya>~p2=+1k9o4tJd@F^^?5RRt;Q=RTk{>NNR3m}U)`7)#bn=p18bZFmE)T; z*|0UBlvI+*mW)JPR0$@tW{s2`*6AFOpR8#k<-6vYtYiH?Ty8ecWE*SI1R1qn;}y$r z&S`RdbCy|g6?LpT;>}s+3$jdn36^Q*V}0nx!4&LK^`zrXX+g8>2dP) zvUXBh;9w%ZhV3G@N^pJ*`z5f4!Wrho=#}!ZHn7A$dPO>6ZXI{MHFk9vPtykb?hT@T z-1Rotqxo|2Ci%q%9Pic^)-{ZUbs$eSH?S_hQP{1FS#=QnMyu)<{1aWQ^R6?h9<&h? z?89>EM$shY^msG`sw!yBf%S|R9=r5P0v6S=n=p&dmen?i>G*2hCK&uqE09Mw!8mxw zS{b!j5zE{Rd-22a0R;Bp!1=IHdIiDh3AIFOg1T&i+4{F~{AN*wCiv^`QXlQvER=U| ztX!IK1#|Zn(SuI;mG$)IE#l5Fp6Wg&ahvY~q%l6h_E^WhmnXN1bZh&c(!EWjyVmfI z4<4a$Cfb#)-MR9KZ7N^c+eC^u!xIg7=`SR=qTJr&U**#A*0-VZjYK=iwRZ_RD=AD* zv}?%F?XZn5!op&^sBO((BA?ri14vgc$Z>eCTyR_shrNyCHSaL2VqnaTeLz;*p6xg- zj+yJ<+p!)hI4)~`FEXv%P4cPlMU&*ed$AbHsnI(`M+AjCa5jCCbnO%mV<+yJouYb~iF9sGpR92P~Tu)>97cf1MNB^Ke$vR!a0fX!@+W_jW6aq`G6@dS21TmK-YI>-6; z{Qy**916h<>zsvihzQT{EIPvT7QFWOOCH(=>uBY`dgYJe3=2Jfk4R;>YmcbP%DA*g z+{5Q~d&M+{$M%ZX7(Ta8<3{Wgb%^_VpJbGv;8p59wC3(U;0YE!hs;;Jg($noHyYB$d8a=2Vuc{NH#qv23cbh zWZ9Q+6nx`BkydVO0y!Axe)L%%w(-@2$o|b1zPS+)5o_T`U3R%366N5dB3jNl1g`_9 z|BFRl{)WWXzo!8h1?8fc5-qF}LgI%rhd?d7J+2 zK8fFL)o=BQR~|YB5=FI3ygetSaP?~IgXPvX-u__UTr z)>MHUcM?S_hv*EQMg@6*K*m(lM3`0C;1po|x{8Wa@6+}B{L?5(noc)W1(Q!3un3~i z5s^}1E2~&I`kjHShSdPk6#aFwrtLvYlKxp9KP#%AzTUryk3(E$pqed{{uFn+{M+pE z?a#rwi{uA?if8?Yk9x}*U%*JcSY7t+S#hiD!uw|P{qz?YeXiG*{T$GijXNjOuzL-$ z3Hx2SZ#Fb-{}+^I*p#`c((LkIqFr&`d(RcsN>`oztW;;kTksd=25-Sv{!-NR>(PSk zj-g|ECqWq()4i#+I*o%@@wS?y{T5N zw|}y7Gp!uH4rt~6xDwFHk!Yz}xq4Tzg)nFaxr@5mTDR&XlT)4;xupZ#QX`U7@&UrsIW6 zEALQit9-$=wjb}R8YLd-m!`xs^ivjrkBlmh(3`6A0=0HCRUW<$sPevdq7sZMk5o%l z<;7V58C4!ZrK!m`K|#(B_B41l*+qh zDC)>4`S?+3Ny*-nb?x1u^Ol!s1TF#U zR{;`wQx#yKj&7y`#Mc28;QTODi%|iRYN;x~-w84*K!QqD0oDmeT^SW1L8YkxO;PGE zM_n03ARHS8@l#*CacOlR>MCb>a9z#8yQ)Sl$OKAL3z`H@0_s-|5_(hRV4$vUrX1wg zr78z!M4(QLa*&`>RWk<(GRi@MN>$BNc7f@j9Bk#XgCu(DO4N~25oWt=wXNWdwS`No z2vIxPD{+zutB)+i`jv!Cq*Nv0a)7EDB_TnjDG5#GtzLtajjGTtqf;IqiIlaf5GmVh zb;{ci>sJ<%XsLRf*eIlIl!XM9rpGZUmtBpNjmofJluo&R98%UQL!{hqwNAM&V*N@( z5-n9}I0vArMrlY;X-Y$r^89s3*{BZPZk_VbXr!!Fhe+AIUZ=bgv3}(tiI%Dfx&lyD zqdX+2G)<66+4&t(HY&u9M-?uuLPW})zY}%-Pn3v?Q;ialnYlTq8YLn@H|5l- zMu|vJX-Y&>EXBS?l~{s(wJH(C3T9s_6Z2!W%q6E%nRo!t#e*`Lk}AbYWi(1L9C&pBU1p7fUdtRxcvuV1}fE5ymcR0lvHEns(5=lrtW^_*Zc%qDZh3z#7f~3L#z~V z6$)>GquQ&|^(zIcy$rp#<&Z=qx_60hN}}D(Y30wC&I)!FhSe+B)fl#~06_kHU(X75 zs#R{rd|1CL7rai|j(7q1Aj!V9MR7cxq%b~!^Quc=VN&9C4#L$!#Yj?sEz5ifpVA_T zGS8SQ%HCwVZcsebt{Oiy*{+sYOo!@N-14DFElakmCl)8pS3FgQROR_(yViAIL0t;R zzw>32lp6@bIx&K;2BzHb6^skjs|6``+I0lsfiQ$$ow)H8j)bUJsg>-+q@Wz)l$wp) zqjm=H%l4J*BvtJQ!vawiLRE@*Ss?u=HCGH*DVAqY@5s zx1i6h6ZTVZtCVirzQ_p)Xq-&m4SLwN?~8oTJihm7(^T78{3zR|*@e#WK9O#p41r1| zy1L!YDSZ!Bx4&{iKO0fQ-ihF+8uqWwNxro;?KOz?jj3&igvCq(No*?c(9>k!l=}7x zXUVAN8rbVp6iw{l%owOfjqD*7=q+e$UqbL@6T1%rPgDBpTYKQ=DwMHXN_X93H9tsj9tJj`Td{=N1RC>n#@68Hk`3vTeo1d{= z#ihfG(n?oZ#&)t@fg<`**A=mnyceiIIsK@T`yItDY^QbJp-+u|GZrn!M5k^NnppWUF~V`UG?g4I|bJei*9y$$`bZ+ZlORp zBdgi!-zwiK2VBiP-OWw~E~^{5*K(OL!mce3bhGb>!8ho_EEpU@*L%UP1JKhi*e}Bo z)ekS&%@LG+(N2orkN}O`?G@|X{?Mh=3$5+Y8MwZ<<%@RXx&?fJWF757+uXh*wL#YF zd&{k|9ZLaFZyZYlvHAGs!x!zUjW#C0uXLFZY=mRu+gU5bMJy&R>Zc~ipHmr)35kyO zU4GFHwVa!zv!{Kpwday--_w51I(5mnu%}(#a<1`h>TPdwB9whGi?EJgk}LYz@8R}> zj{WUJ2pSHs`&jeF`#v6EKWpLYsfd>V+bZYHf?swhinsZ5fNix}Ar;+QRPOOpKP$vk zlN&$8_v%Zw+j4IAjd|Jbz^wiBs(oMF_JmZ6K*~L*d%LgkYj#&F;*$jT<^(vWX*JEZ z<#(^!%dBPld_#xWV=Sv+pN!3h7+xb!WZN}}+!;VN8*0z8_U!W=847u5@A(NMTdfcW z?oYwtK|Azzhx)hZ+x!`5f})?~@VCG?Yvd1a*>y-~cYscfVRoYPE7^J&gCfqh!Y~xZc8rqV7*+)#2!~{|BtrJ<88z?nUzb5#VCQ zV>RWu5q5%e-{i0CR9SN*(kPS|i2BG=Bkgt-RwSUa;6_tkh^;ytiX09`zGsx(Fm*u! zj-XOjE51#j4Q~XcDj1*9?iDhBlwA?YEFNWh65#tB@CbdEs2^~06gtEK89Umpg5VY% zJUZHLgad=`jJ6+eu8;>u+X9{C+Gy3qQpTvK=40^P3i-5t8Zt(`n}Vlu%aLPm4o33g zTx=w2g*>5ysJAhUFOl`%RyEN1ZACBpZ9AnL>IZWre5!mHVdz>R=K!qZ=@*+MIn*AM zzr1Z%t>P~cg}ncjq5={zX2$J%tpH*2hYih=?;#pDF_ zn~hP)J4y{B>Z>o`vC{$H`Hq5LdI!z2RVmuV(7`jm|FwfuY{%DYN#orua>C4ZlYe)6phn`C#wQ`brMeUP{dCZW5)hu9?h zIYd4%+3twoIOSE}=6MPH@BT)1l@BXcvr}ox%P($+D)+^NATekd!%!PujW)cHiUiq z82Yvk(!{HtPWXGe?~{D{A0=*12cp(|WQRIYJsUn&bi-!ax8VsxMk0I`fvJ|u=V#h> z`9;Xqf7q{dPeOOYd}WE8I#Z1t%VuKXfaXMGp!u_}t<1u(adxKN00gUlf)QzhT>FXL zsC+@fSe!6`Ob_)B8Y}bN1=9T~Dtm)$_^I6}v4Cd~6v97j5b=gaeE6wdtHq7uM1ZUs z6E=_nMjt&u@WG507&&C*Ec-PqCWpIkIX{p&U)Y1;G%jYg-5LQ(jeBPt?!U1`sUqgj zhS2$2?w)O@yEi7Jg26G|fw;3jFp+cYzn$kN&$ZJNw_+$^>2x-*z69XBugP5d7blbd z$De%MOO|s;2z{XPG>m(yd~lxqe2WfVM2;rw8nO$AvdhCTfG>4Cz5x0 zXF*^_vOinu(lD1dUq!$q#vg&%I1qt5v>}_+s~k|})zmhfHO#|0z$$b17jQ zC0nkwZ}H+Tl^&^3Mmcb0mRb|w(W#D`fC{Xe26->T_(vzoyQp!TXFBe`LB^sXW>}ng zYwhmND4D#@o*4x~F3va+y--ud@R0+u=RN8O-Wlv)T}i z$xm9eTFu2h#>VDFXGodpVxUnMen;~fF;_ij#bU32F3K-_>rQ1Yqj59vW%NxmoP`|Y zq8+!fz2dmWth;|OXJ*JICGwAR^~K<*n}JPXAh%)f;BNuY$$;=c0H&r~ba-85;c)%N zMopAs8dQ#raB<}T+4kCY`xVHhv)k>lv}YUoy}b*MkL<9sVNUns4tp_UC+xHzVED^U z^>)cF`yIS(_k%qK*quMve>ng4t=)}z2SNY*XisA}d5^tXF4}9~>bxL--)qy~}+>*xz&$5tq6z1;SX(a^g_P*tk75X7kiE?bnw6-QNAUoaRGSh zO9Q*wZMyX8{z9MIx(pm-59(`o891=(fI)V@zTNu_>N>#orMOc+3e&iLg9i9f&k^m% zJllPc?eit3KHeds&U4**b$MZ+^u(lle1p5CK9o}Dxh{SB^c`e(@7=Fg_vg9~>h-Gq zT;D$bWg5swfCf_Lqg+2UwNVChRUN5Sx4U)k+Ut2{%m2+l5b4T@-d*~<>N`6$^*yWV zfUW~y>V?d|+OMnKeW3kPpPqgCzTC%t{v`zjj&FQ}ho$}%lB5a@Vgm>Hi}HN;fzNdr z@Vx9aD)p60>h*v7_UbaId#|q7BML{QCj0h}N`0Ya4OUxFb(l&U^kUaQfshdv+t+Gc zYQB}G=mvdIqW%VU?K7ymZ(l*`-N|?UEAYj>sP%!}U+e0)0rp~-J}(p}?(-Z@JyJ2U zZ@)p^`}XP5tCP>VoSJS$y_DIBRpIEKDaF;)$Fgpq65Og?dv)!NjHznX>2(>%itjg| z?|-{L-_`E=if&LlsQMjj!`?3;moIktZ&$mEty>}(@endl_l^Mr`VM&5_vY2qS}D42 zx;+22QxivAad|HB>LqlIJK(UJPW;fQc}&ojbV)8QSqA3q`z0CPB;4)0t(I?-8os2?E? z@89Z2sKc9kfUm+F-aYC^4Tm@XXT~&ic#r>rANMu_iSB)#>-Ey}T{}JBU4ERC-X@~p zDBd)=PY#`s-UMF5j*XEI46BkDx9Aw6LqEqq&^<9mrVgu8CuEoTVdb2Z?I)y!8qC`ICVykv?mRnj@x{Di~0)8ReDmkHhVhi!k;IsP!-VO4i{^UmX)&7y=F zz?VlfAVUGrkd6*kmcv`9fGnrW07viEK!*W_IYP5R@B#_495KMLY)3zHs_^M@4l4uC zE;}FNcOh|t^-$K(JGEKUn+sfZ;_#n0?;>GYq?2gq{rIb<_bSpQJ)6x~NSsZ^OExgC zNw%R$)>g^pT_TPQDNM4i?{u;w{K?w+si3EDtju)avY1c#u?EH%AP54OcbOSvTU9p+ zmpA$AVDjexXL$<=^NHc~DwPb92^4d=#_d3u<#2QZtR^BX z+cApeB{C@n5sgi<%}(kLH|slx)d-}n5aE%n=5Ya9R2v)%#hU;dnW7T^L6~c`d_nJ(6S6*jg?Q;$b@+mL-ZICDn zzykoVHv1tBhDRQBSam@`0m%7}1Hc0J2Y}&4P+&2Jl0sHIVmbrC{QrqeF#K|@!-6m@ zR0s;v=Kx?X*B}!NI|6VENW4iBiHLXs0MykauZ?vTzzyC)r11CUUFiU14-fcgLXpf1Pzw>p{2ykVq}gcolb3QwVv zvKWo~Ij#SIf&wHXVmJU~ly^^PxzYXuN0wC;B!+{8oq{*xOd8dW>ok^~cUUbE?<~6Z z3n&qnR0+?MO;V>#jSbf|mN$Zc$^c9OfHk$k)Yx7CYJ-9Tc<^GH24p$973mszF3w@4 zBQGOKLI5!X2(n@31&0bVP`8aif&kmbi#Y(W`__P{WVjTJ%x)V}wDK?rjUp};&tCz@ z{+qqcslwgpW;KYjm%T;#LttteA{H57O^+UF#)BY%_!o)+G;cJ3mDBOG!lc~jcb#&` z0Efjaw{duj2&(iQ0BnNcTeU=&y&V>-1^?V2GzOR?Cmz<3|C#qke$kZ5_9rE8ArXQy zagzZ|{80m*>7$bhWny`66UKzL8`%3>H0)s{$Q-FW6%tmN&GCcDSMSL>UmLP|=!~&= zV~JvB_5eln)iYX}Kxy}vAFxXUUNRIE0Nj=3xCjKxQEq~6^!ax=tn$c24oR?2J9_9Q`gD^f(aDfd z07|(q18{w-0SDTtT;`1@1xmRn12{QV0}3>t5P+=cBucqBgK+$*5nD_-CXfWBTmk^h zC1xTj%+|RqQ~*l3WJAGgSkh-GyhjQYa>7tpWhiu46tcX93PCAX6$sXP0VEK^=P}ht zLrqL12}-%ThQ$4tM<^C}++ii7ITV0WuAu=;+M{`52AGvRF^Lo?c;GbE~@986;cIs{2fCJ9Qpy8$3OZTvx3Ra-D;B~T#1rj)xM0QQ__N1ZBs z5ELl0@UIOBN#ZE$IuM8P&oLdjd(Pf&#GPJOKa;6JZ3)l1{o%Q{K)33HH;@K(J4YMP3-bfxMvZ3#Tv* z8xb!Wz-UMm0=`BEXX8vIfKslX0nEazgMcIrC{%znM7#n3o3$2dmf=CvOFHCV-uonx zj+mi9u;)gg<`~uof-@EcsDg-5CXJp~^;jGGn8T_D3VEbZ6@VNAn15LVRz9i#g$hs& zfLsH3)=*{H=`bVDd5euOm7n8TuzM1$tB7 z43fZo=Z>F1f?PA@gzj!f!kwzSDL^J(90GuhIqw-Qr&o7TY2_=9n79-wKnq0F0f6l` z{IC{bzxC5ybrvabHr2oYHvHhsQphT(ad1zeLbL*+DG;oI-{oLEYACG0c~0R&g5u6anX(nH6+rRmQx}2_Qe*V>0krg)Gfl%UA2xREQ8D9sq*j z{g}To+>Dt!*f;MBk|57L1ONxvrC=?F2T_ll;1QrE5uE^F*E##NQ-z;**E0>ye`XWJ zD(amD6xpf?SeRh}P~=dc3W-uPiPr#PtA5i+w`vSJG!_YYb4Z2-9R>i&l{15e&aI|G zkcaIw#(>g)(#@hkI(H6xF(%^u8oL zPG9p3(v^E1l12P7i!c`aLjyaXa~+Jma2`>t%vlE2_wDOY7}*LHit~-R2DSUxb*OC6 znh#onbB={XvHV|bzYeCBy~L@h$I7>K?Oz$9Yro4ITGV|-0H+>HKuC2u$XkZn+)l;K z3c#ty8k3*9J7{kE2R)xttOaWR!>PwcAc(x{phJZ_YdWpgC=w9SHm4r@0U#fa!&;Q# z-w!Yo>L1puc?*f-45Y~5o@%CXUqG^(F`a8$;?zXsFmUYMStGn!8udYliFe-DL~tT< z0toU{4Z{bS2r=LmDgY-UX8>RW|1y^ITFjOaCV;{>q`-;DWdK+or@`9{pM6`8WD3B{ zhxZE2QXMrJ-Y&?;nFyh<%JY!~vt6c_mNoOy%gDoLPg8^AEmQzbJt_de9^D%%1cnRC zI63vmTSN++dL#kB3cG^rGdv3b%2}uYg5ysmgRs$gh*%w`5P6G9!bXHJfcoe-1Ps-H zLIvQ|Bh3KXn>2RnH1d{^LK*;74WKT13n@%AnOLIcKk10cFo^Q#B}8;FB$ko{ryjKo zAbh+Acnt{!s0u(`0~ms?K?_G5)a}Ye<5f*k=&Wnu4a8awq`h^lE=5OhGT2OspjZis(%S z@Hu8vOk;=1gaS}H|6l+IwgN!;`?(>pjwC3V4;sV?Oyx=9O+!KfxElP`0GgSCK4B=V zCk2Y+vj!kdL2oe>6o7*EiUDNxhbDk&cnpaGlAuVA=&c3EVUL!iJzrC^j3NROqwtb# z)@z0qTyK7)CrKOlHMzK)0W2%jfR8nxPyxun$r_O5h(Z}iY8?=m;^b{439@e*5Nwh~ z&9(mLwq9xqQba&vUA!b{_lLTx{_>8)x*K0_@@Jqv0IZ48_?qD!TMJ(WUMS7D#ONXP^TR90|(&&VWYW{&-7o&o-f zMLL0Ds6J>l3l)I%xd{L=(;fgA4!+Z2wE~50q`>a<696vqUvuP2HRcsPG$;gl;xG`* zRK-j^L1;-nkI4+il85M00$q54xD1K4N)pQ9WtQI1^%us=06fUgbUH2|RBEmQz@ zC$9laH-I-#&bAJ3R^AUJL8iC|2(ol4)<+C4{Gd5j0XVn09{{r5o_F;MtLAQpRUety zO$r=GIs(9!{iCj)E%+%Y02%a20LYJd@98PaLld){YFhszNpMor83?vZGMa1~ajT6v&7}0icZPyq|L!%*ckRt)fB&;GAXx0Lq! z@5hHViM=GjNzMB}u+Q#W=r3fr`~U!RmsJDh;E%5#(yEM~NP?4^qXvYFn4jur1>mIS9{|YX z7k|@p*6I)wO^}H~Qb@;(kiNPY4<`5zl@$r);t9FCFDfp-Ln z{UpIjO>K~1Gkgwlz|aC)ajLHXoYXW0K#g0d35Ly36I3D`AO+58?f`&HGaj|guwSfY zwFU(R$e{fB67ze$X|`Uo)#oqkfE8%S&J(G;oqUv0eU3Iq$->w+$5qim;IA{-)#I(RX_ zq%jEvWg4GguZep!1k|mIh&KTsZ_mS&h+$1E{mGzlp{rhbq?~JfOlYq2*ErT>@*A?4YeYtbPACP#6*+ulgJqpe*9^zD>_UP^%3#00kkxLjNxj|SEvBD z0I(1MO3P=E1jE)>wHQ4D3Ki-$K+H-YnCG%a&z-mpu5r_ovkD1GYKS*$Od^LN#hHuA ze>ko7D9A4aG(yBKlgJ$=k@FDdT&^epl`97UV10}+9XDgNQ*F~8C56U#aTWjy*v|oA zxZ?_h12i9nMAU17m@7b#Q({JGPI01&Dg5F>+w?1+mR&X`)OMWt(b2ihFb-A6NVq9p zJNs!K9RkAS(Xq%Ht5^Z3a)~v72atCHQcywc&RNGvqB&xcfMA{`n!L>e31&?JTHr-G z0IZioh6VfPsm-FiUr9mfTmYbyUH*q2f?NHmMZ5w~<K4?iQ!@ zG>UPW6z)Jo0RS9Wf5FBL!^i$s`-p`Ka3>;m8^D|_Ox}VJdB2gwT|gW$h{n~ma`V6) zdL5(y?Ev@_05V%^tkxKgcuUU@eB1Ex(c0Vq^}{{WB-Ko;e0UzCI4PJ~b} z<((mkdk|3z2)@Vw*Dze#*I~6lO(?*7l?J2^LT;G;NfPZ5(Hj6ZLnoLRFnkRmX0e3|&;b!6 z4dBB`8n7H8r@3cI;Q;_<06^Ja1?6MdZJ3if^+Eu!In)s`i-2JEKEtTZuyCxd%yT61 z;QvF{nFrWd_W%E!$*CqwN@B_~_I=6HMw&rVN|NlMLMSDrl5)n__bo=bWhWs+MT;zB zS5#Dz+mdM0GA&B7{9fkGm|I_I!7xn2 z7X#Yh*ao2J@1dVjxSI;xfk>Q`!o3)d0#qRU2|!^nnwP;5HiwL8i{+yJu4GoZ!S%_T z>DIO2{f8vl0i*z^^3G<(P~i^VgFW)lfc6+F0H`!|<3$zj`7vq((m$op0YhD{(2|B! z3KQa8ZA2a#(Gknd5UMxd?2THu?N2vyI39@GyW1O>2O-E+W+ORNljgE_{B*OZ-YxBZ-$NcL2d@Np!_95c0@QpApp{p+dxBH=dbcu^fB7PJM3OQxB3BfTq|pdO zGLY3>DgBiHow!ZuvWz06@cZ*FYC}alj-eNT@_)z-R}NM(M^)cyKz|&A0aTejnCmK4 zD$_yL+hJ080>>BtwFwPL9)*1xxj7s_@%%NP#Ig`V#kCf3w_>|H>$P_oodmsE4B{2}oAqZvTV9psReD*$<&jvh;As0Z+ zP9u$##1ITmL8#Y#fi_HG(|g=$Bm*)ri~-OKK1rS{Y(lDYQYaWFg`pT`d*C#q3<2LW zYf!B-U>Jrso@DAG&loT9P6fAgJzf&SaqK`sS>BGsRCw9%Zdz`@2n?SCsKB-N1+USm zXwm~jVgd>^N8{; zJ*oqIM+6jo1gGlJ=}+81a{V1P(hOeE-BGnpgCSG~%Cb_Su`qu%}_v}0BIzqlvXvhDikJIQ#Z@m#h zFZX^q_mXRGi`uz42)8u7+e@^o>Uyb4yktEhktIN_RtTUM9kR>4=uPfLLj%;c`~;xb zE#@^8KE``CVssSDkc9e_(-6v}AvIlQt!^2$)i?t*7D#@|bwck_p>)F!06PEoq7>A% zTn?b6jaVgZ;IN4d(3G?^Ko6}ApcZB9v8aWU<0J){6}%(~r9cx1g}F;zO0*_1b%M|U zC1NW8t-!p~%}^B#0#!AvPA`i}r8B`#{{2YD!{^)f&OWiWs;vKU+2aa|m_+cxTg zKoV-Ura+V>T(?W}Pc{QvMYS*+8X)aAJkWr}Z2?Uk5X_Q-Uhp#y>{#G{(|0)q1FRG| zfWP(5+O;II9TB`N2^GYvo_3KqIKcruy@UbkE*pB_CI-S%DB%@mOF^UXE&ys;Vc5tEZs>)^0z|=tw>I8A_(Y*`$QV9djh1!OkQkat`XFl5a1)q{#FFkNSSW>ZIGO?| zV=w#3^+)$o_Ty@5j3|$%gD1WwJ48N6b^rv6BvApwqaG;yt!u1a^Np1O6#<38i#%Wp+tu@ov{SOB2Nyv`#C%bVL!J$O|>61VI}Le=w~18!9D0ijkN1e8>4z6Vgvn9e$(!qo$`nyIt$ z9Qu%oTI0w-mrvD*ogE!$cVUtZH;qthd?AEdlJ?22A?(WOJTBmi;RVptg8klZ3Q84R$?!r+KcYugelaWnv@1Vq>J*=QN~Lc8idu3QPaR z>X_CeR!B=$>B~R&ahBQZ_xBtCCrGVZxf{NP=&g%)_wLs+JEW2%K%Pc{uXXFv08)@ zZ60w})!R*s6uv-J(1|$%)VwbM(4DIR6fWHEhM8-mp#FI=fMWI&b2o*ZKX%LJ29#9$ zzaB!_d-IL1d4D}q8OHGsNvL^$fLN+Btzsyv@R4>_12zk=LTRMh`Pn{wK3U=_=GxDr zTB8r%mQ*Uhhdj#$BA2;QP?Mv7MOB+ZBh+FZfY1QsPnLibKHbyJ=HEernQeGtCS}9b-aM-6v6;@7^A`Lq;k>nZcL|7ghS_xO)osnxkx3!0J zMn(~Rk6X&B;r|*@wam2{a40N3&^3QtGM$K|e<)I4ObhLnDp<=AQ8g@+=h2YpV%6%_uOmwrIz6#LPpygLE&#p$ zEPpKEBkF{;AJ`&9G&HgnAKH1cG_#wyZn+4pf1|-hiPpw*(x>fqFZyK_4KMt^QHYAP zotP^#+~b?kw@CAWr)|m^9-8w4^tCagzhi35e1h1zV(bJ!D^>jEC7KP-&mg!Im1=3y4xeLnCj% zGXPR~_aLpYTBC3J*wExWK-`LbuvMZr;u_>1|J(@|!S7gsAb#rwSQ$350c55pryp_T zhYuNfK0yZCq}dR1w2$aDpSg&pJ!*6A^#bCT=nHxFj{+ImJvZ@jvXAnQESW3H%YBpu zMCXtU+oj$JPZp%A(A;9KPA}MKU2iC$>diRjd8ww=a3@40Msw7|(jIxzcOi zW}5Nz4Th?JF|-Njc8|{VXnyzjjQ1s~MnM-9nnHf;$$_3+`G6v7|1$Ky+6BS3`SXQ` zOL_PS;5d5)86QAgoGP9L7UcE0QZqh$ z-o&G0KJjFri<0SH3qc9koWZR(==w9 zT7D{p_5cqKqJL4f9T2+edl73!sNmzeK>(17~^W&!AB=PY$E{U{M)h2)%o3zb~foPHs_2ly}_T=nN`aE0eAz3Aii z=QGiyR1`uf^aOYhz~(5-CyMC(r*Og2XlTSk7`77|%`2{{uY)L&NOM9hvLBR0FAN_c zp|bf6#aZFp=iJ_w0lhICLP3+NHpAVN`woW787Lf*LLVIaP>2!6w^O-vv3|WxNkStY z#t=eiEHR4rP}r)umaEc(!;*Le!w&$;gl#8WCY)e~pk*op`r;@)#Km#|(u!pf>W7YN z9g)JL7=9hZLcWIe(Xwt>Kc}UpSe(rf@fePCJVS$~b5q=)DL-?sf=GNRpdXI>&$+Da zM;ab7Vt3b_s1v)B){%U`3AeySaFHdpNqjw_MS zi|tDx+}B#=lR= z1EGF=&i8KYw}y}c7#`Czp2pFESn56I(by=wf4-aY9T)HnhOPi=45RzqC}$Rn8+5Ll z1z68xq&BP4d~^r0V-{Du_g!VpE?%Pk8JBSr z0JNn1jTD{(=mVhYu{U%Rj6*%Og2%ZV0ng(Y0Kf~!Bh{J1>doJSP^x|_VlalG5K62s znb9dc+uM~;15|Iud*BBTJmi4jJ1JxUt-Hcp_JZScgls6rVoo zCqCG?PZ!B!zz&055J}ILA@+7b?FyuCPhg>8W5JGkHNz@;wVxAex`-!_gFEKsWQnz< zwU=Ci3w2@pR^ON$gYg_U94z7$a}$cXNST1(cP2>p$t*^xz5No?TxTQ=;r7HW@zCy> z2=jx#r>no`4R_CZ0hdGp?4O3ezvwZO`1muosLqA7I%?tkSYf?@OH@nkAGd^d%Va+O z#w{u+Q)xBn9^;W}3`J=3&i|i#f?rW7KLz#__eiZ9^C#Uqq>$Sp;ga#Py5CT2%2Ym| z_HVJux5}5h%o+0=faResQeJ>OD^l(BK#vw9CM5j&1R5GT4I;u4uio>4g03Z>P0~=h z&kJAz<^Pec!S7gVOvjMVuL>9X(%sTnxtcZkK)EYRga%~+UF3DYXS6JteEG8`94MF4 zJUEF=`59R9d!nWvjXgq^Ux8i!cy$_#P;Au~`FyE=WS0kAZd_h4>t{VL_(PF;3F30E zcGf>HC4W|#uv|*BP(;d1h%3ED|NleQEe7P z2~V71Hm=fiJ^p3vrTs1W>F-o-eeS(4z^1>ts5cu=Sx9Bc zH%abg(``f>iCmldk9wqV;WRBF+CMdCf9d8qeB6}j3gJ#tN-au(LRl>jmP@@m6j5U? zmWs%$5n9URL=EIfh{waoMq`rs`p})7EzqdibtQDx22#r__@ueqvdZJ>%tP>)Fv(k74;%iT|@x<8z zS(OeehDI!bIN^!Lp7_M)O{$L}sh2l~=cN8B_vUi%1%!8SuL{j~1+r$NULq7xV=0!} zzKkqkn?rrkf&y6w4wp+Q5u6vX3`<=I<p9QmqhzN~X0r9sdmU&`V!L0X8V~8}eAtEg6s=b=wiHQZX z{xS_CR^o|yVwfj-6wIpsrQ(q(ja3kr_@&_+A(S2)3S>PF5f4R_eH}{!UlR_n385O% zlAg6Vq5X?8h#v|WrKJdTQT7e&H*<@sSCLm;HQU4r!)*t?MU z=Y6fioDd%dk(5DK8R#lyHNM+m6~%?~TvRh|xAE?7Lkkm=(9ks?tw2@r{$`0(VXyu6 z6`~2l(TXanx=4KsPX`~>0zRsfT~syxp%|rqXm?0k7b$OJ?dqdkixID)+|WmP1VB=n zhMl@fc?Z{nKE|_rj9dB`8=9fT8(o!o7qpL$@h>jM@kl9)G9LbsWD`e1YHcdeMao*N zPxz?EeAK&I)YCsSl5s_7|XX63F|2SBx={diY9dJyVW4cLfdnOAsb zsZ*HK!VPZ6NFfK{-RGINsQkQI-3{gv%LyW)l!?&LO%TO>@$FO4O)lzlFibl?W2Lnj zU?UGxE?xVp%dO?iPZ$@j7m$=edhXJd2JlC8QFIGD?ZdT^9&A&np{ULc9A`Tx?*-8f z`U>wjE-TT)M7DeJQeJ#Y*2ZJyu88jvU~>Xp*dFcw*u*_2AeNZpkC(2V`JszMWacCn zhf|DpQ?Tn_QpDrCgxgc>KE31~iw|Y@iB9fq4|3yGcH6&h96!O|80f+d+T`82U3f*3 z4s+%hnL&aZsVw=7N2n-W%mS3cnt!-os+uS%)y`wjGv+VLMKaS;h5ea;)J4JwHJcZ* zohNc0o8=0drzRz}AXz6#LbbIZKoa3_h7k%^mrb&>5d!Rc{FnNz-EKF!<%AyrZUIz{ z4JOB>lb=sTz~~4fllQnO_Lyhf3}>49U19k9SF7@$RzNgimtmpw1N=y)%YC5d7f zUiQS-J00+pmoPv*$}$gJyU_t%y}}GBsQkaCO9d7>AT&TDu%iG%$%#ye>#lDvn%#=iF40$)S#{=lQl?J2&oJ2usm`TA@*q4&avP%$1 zp$vu#sAQ@tPabp~T(PU{5jg_lQPo&O%Yqc}bMQ14F7(D9Q{5=T!7OQ&!*CUV+WhKY zxQX#IuEpCWz}goh%R}VdXTQ}|%;06nk`(}KpIzyn*k~_vzj_+$!FGtU-4$Kfer}(^0tR(j@yL=v*mO!;p8wy$UC1X(dAg zssr>k1>Qf~p93m|gib2WlSB=OM`ZHqfJpf-+?aWApuA}4XQsFkkq3y@In8R(*{#$#h3m8>~F zdG958>+sqBY0x9mc2{49RH47&uq*T{iX>^tbnHTDs@2tn{g}{vJzi*Ck3Q$n_Y-Jn zXg!Emd_*s0QAh7uxUfs`MbfH|VJUzbpCM!2!LKaBvP8>(YjJGw&+6o#RhVbp!0$c; zizRU##1{XITYZkKJn6m>Wx(|S+X2)%RT`^RVph~5b}lk_MG7}y$a|>ZD9>OAV7-87 zDzP#0Mo52BApXx$f$5hg*-`i<(rN(l6Ax4NKUT$cedBrK*+>}B5J%n(@%QKqw0a@H zmKR=?)J>3Qky0P_6kU?Si=I~Aaq4Tl_WT=RNoJ8+6@JORuHsjq5n+umSSpE|ar}=x z_0WJcfKn)^i~6HO)WRkxP~d}QQb@;96F{9@Bev8Pz6MZ{^bd__jO8W>HB@gNa}Cvv zM3dADmP?`uj{pC@eR^m>Q-HkN_HVFbt_JxvDck~(ciTRm*ta*L8ARTF`;$oAio^;@ z+zK#^SgHi(>*T7;mo!A_yoLeIag6jp`{J&swxiLnheEa#ZUgw&URm5)qT4AZ>ZOgi z9b$r)SnI20g&{UBIw!!Y0V)Kck$2$9@|ruQxoBNM zfor7lze-wnLc9c_hVd#6V<>!!uBHnT2DHTRvX{7->8_HX;FU@CeSz1da2JNTJ~p?| z#40vrDUzJ2*(e~Yp2o;lcow^7#UszL2&;!ZRmN$(A+5VHEQQbzunij$3JWneP@y%T zHHO!HYAGr`aX~XrBmVtbO9&; zpcd@g60QX+!f1kuF!Ws%qNfz`HTl6^&f zt$_OhCXIB5+Q%ii)v=@Lw!s@3@Blz*ADcxZU2LAB5Zt5k|DGfsgs2FiH*HeKz46Ce zX`y%_1G?j=51`V~^Cnkj8`E1VRo6+O2ade^S9QxJ=}Whv0X+d4`uE&KSFQJq4=9zC zbT2>Fz8(on6J6Ms=FC5s4^uln9*E&h}FRlF~N#98N@C}kuKdXzB z-gs_@R4F+Ankx+(DZJ$MmJLdO8)g~W2jm`5mC24&1cjGSHt#@dqqH8zkbkrr0Y1wC zO^pQKc`|C_Q3D>qP!u3P;cDg&3a1UQw#I{TE^ zE`@=NKO>tx@x>A+vA@5)XlTT<03Ujw#2g1?Iw05~i9rBgcwqF44yfi840sOUxCf?A zalk7+EqBHx@jS%e5K6??KX8fp2GP_sXQzO{IAUX5QXTu*N!;WU^L+t&jjI8qFfYd` zT;mlC$iPv-1D!aoEbujA8Rr?n2a*_qrIsh|o#}{n5Q>BWnE*F<;OI05L>v(8lEP4c zJ3Vlj-(7y57nOnm!vNa)`qj6rbz1SrkMyC{iNtP642Q_OyIhW5SThF$MgVj|LJdM) z-*dh|v)T}a52Y{?N8at{;q<~ZQ#k^n>V=IQ1^HmyYd!O!Yh(s9O9LJKkx)kUfiCRZ zat}dj;$8D{O){7iUu|bsLIcKNkd#`_(bN}(Hv?2g;bSR`#V`Rtvyod-P`E#Z{!cBM z5#zAThESu^pXE}8OK8#9%Ff*5BOd6Y^msh;JlWeNW8_^@@fz$}x=P<=JRMA^3)_TT z2(L67>CBPT(ufK);1HP4lX zzes0Bg25+}nvCOZ4}8RcN?4*U`OC7t?}heI{TUj!ytIqRRM;-yPLO7JWa% zw9O)H&}is%JRzuN43E@vGlyc14E9MY3&*zrO4y2%+`wrMfZoi2894sOc75=<6kb%t zJ%xl`=yqn73g^?Sa(HFEh-fNWhQ5U7Z~w3!J{QkY@*6X=+z)wy`z3T;mYfND9#-#m z7sEz{UoqR)Oozz5FUa0=zombG`66~u^rA~SgQM_+{5Aj?`-MjXU8KyyQv_0-=@V2L zg}0KtHON!}7bLOS#bG9~FXJl#tB3ccKr5U?uF{8$J|H#~mM;(LI%PJ_^1v#15Bq{Q z?s40*cwd0U*2p;!C6>A+pUQE!BlJw4EZa8JYJRCIu zR7T=a23)!r4bd8jgOZq!p`MrM;cM6!iW%eC(0~O1*8%ALPY_*&RW5M{X%0zYA%-*$ zTu*KZh&UiLU=g2h%Vh0do-&MJ6z*eQp(f+7Bouc2Az5h*Oe(G@_0qdpA%6=pL?ErWsquQL9PbcaaAGoiIk@lbu3b+$v|OGzxn zFbv=d!u@mI!tPiOS1A$(EWd=5y0ErFXf6fhOy#f#q zj-yafJAjE@-T!+`IEj&8sx7&vD@)UyGzkpP-8i3P(8T66n%4 z1QWZzU*6Qsjerv;hYgNun{T%Xs1eac-PJrQu_OG?A>yoI=gOtF2)+TRv<4ut?fWXK zszi(I*e)0Pw?Y49peMY}TYO0D-{!vJM$D}{Wc_@uT&Z~Qt>XDML}LFo&za%f0+I^T zAccm$1De>#&2waU!goSbHK^BB%DbS69o;-fhIbp9#%WFtSBYHaV`3}e~`fm>96PW zrI0FxJ2)??up`fk+_JW*LiQ%%Utub<}&R@qz6CicGba#07}*V8Lpce z&9)MAG^gwP^*p!gH(%?5FN-1Rr0Z3{rM+g-@P7hp#FhcbYMcI~N97-`- zUfu(1?dcytar+hDXx!H&T z8o%vOLD91=Cyx}?yvEhMGm(VF_P>Rrqpu8+?QY6vD6%K)E}8u3OC9Tt_oc z{)83^_LbX%ULnn=`jaSFiB*#x7nVv_8#(H+kU@+dbQ@SzLKe_eSk8%4jrhhtZwoDn zo;#s}t8DCPsI4_lmG4J5rSrUfZcsT8PIZnVem8pO(JNg<>3`CLOUQ5CaMwSe^o-Me zXFaxJfx|BU2UDhHOrVL^#!76Md+abK$5K1V8{ z!~HnK5D~>N6opVWZ{!UWHU~&Y!T?P!O9LngmeXn}Tu1>?(r2QOnu4JMfYS0rj!V<; zs8`y&re^7=HXJ_-qOn+2iwxbGkINVyrQT$y;&L@W9Uqs~J}%qH9lef#)Di#<{iB{G zdGyFsfXamYYl_e?360~~ z3m@mPC@KBA|C1pXXAm=SWm2)<`wh{a=pqhuv*ql_j+cguR0p&5oAB9lol1AU8aGcM> zQIe>Dp&x)6lqY=?`z)t})Cw3-5y#U2Djxfvcc;$3qXKHFWV942;TYl-Rx-Vk($sDd zJ3b;HZokw%%1iwAx05))sVY_!f-w-KRad6~C|6(m)#dJTetNpdW zB;l3nxvkc*0;*vM0MzbWa;=*`)`@db9tk6=shet26qOBg4K8UU~OXPogPiL(?F zb)g2-1XvEB+EMrhw*(W?$KJxrjF&K} z-k@|Veo<1QG+NW7C5<`&dc%nC!q)b)HHVD?>^z5&D%jWg7idq((JTMa)rwXyT~g{# zZvs&Ceq~{nkiRIJ7c${T}RE#5I0fHYZAs3YcM6(Emlc!ztP4vgM?33@P$Y zK@47!oO;$?U@9Z$FLdSg6zQV=!bQ(8jTs*UQ@Xr6%uN;gpyr~-Q-hgOQ|J0L7>N;& ztS{$Mr4*bxGAAdlj9QWB;50nx`K>$gR${OASKU9_z90~&iZ%{V+4pW+camzrC+_rv zP&JYUs^Yu^s0>^Rc&XmMGo&7+f0CRfNhQD%FS-3*Cs`4&zF|(%K*f2D2MeEYD_skT zq?%j(lj6%#R2kX=sKhDGJWb)2_#QQ{?w?|5q*8SkqzZHg-do|XfK?o4lCveLblnfA z1X@ER72fsV2^1QbisL&#mHgg*MQ|R9W%d60r&!LBWEnpE3aNV8;c_?dIsB(vbMjzl zU|Bw$@sTWa*hMlz0#y*jKPk?YVmUrs%-=9m?R$mBK;g$kvL>O%eI1)WMgG>o5zVMW1MDxl?es!9XPnR}HnerBXNe@# zSAPVcoczU);*R&W{8}daL`;!b-z!6doGT97718kYTF0Q)tG$&@zrFrz4KD%x)%QKkikyjyr98F{MF z)4w#N1eU%-pIDev#@*`kr}uxg1gjOLJ8@qKrU$j2?s|*myJ&xFBVpQd4CP4*Z({7DD5>ZPvR{$ z<+Y)GEuIlYbI$`&3vY|LBad$jNWyXExtKXlbaFs70BUq7pp;_uzg9EV#su$3MosTE zdXCEf05U)i+|Refb#%-KwNZ5-G~(+;kyAL41 zYYc!IwwIQ>2Cn4A280G!8TCM~pB(TDGi??_hps~*t__wzSN8RV|Fy(xfNI@xwA7mI zr@2-5S~d4Guj{2C!@B@V%muj-3o|cD;>|gaBtT8TS^$Op8U9?V{`Yz=)S7G%p+tJ$ zOI*PlOKPSq@$z#}0`#UI04V(IsC%KmPP-pE*eF05br3*Z;ZBlW;kSgh5eWk{G(QcX zs<177ldB7vf4aYfmm>-FJQs4(L}j2S#}5?7{<7bo4h>Kp$$P?S$Jc6YIDI4qWmw)5 zPGA3Gt5cx?sx&&`WG9=7al}a()y_hG{7DiT5$8SORQ`yQFhGOuyeFI<*=sS>O$r*udMB)(Dd^=3VuX!0!4kGUM0tR`r>N%caqnS31%SM# zsCwCHDt>)Q3KaqJ9-{goZl|gE$t4jQ9OpenRoPOV(?Qeu~YssOn@CFYXIR{mDH)DDfP29fue)hA3@RJM0ZqB=m{V^)9s>{7yj8UTek z;iSUy{&#Y){b?bmkEBo&gFlBAkIZ9jR?XPmn_Z3?Q42%fgIJF)bdmT-614&Hp2OP3 zY9z4?4Y&p%?>Vex)1AV{QqUx}iN3C&hZp0^JqlaV#7n{m4Sn;T)M_=_N$in?2E6Tf zQMDvxn5!w=MLTf?3I=Gv`yhboaw)d`72Za#%aTs8R|?mv{}}|K&Z6x%Zuci8**b~P z2#s+wJa zQ%Qx}N4PD{PkDxoXXgW`H9o+yhr%vN?kp9J`_RvTl2Vg+D_z;hb^*9jw0&ha{+UUq z!;>Np|ww1jWE0ipnm;fx><#vv1H7N|ImP&F>LU_ z$Nsl5PtygKLg8~MqycR5z--2IQs_Vu)02mv+xw?u*$Sai!R1IOOmp!K_DiBMhMgYR z*xms#08T@N1~dWK?SW*LB&AT4p1dlN_(BRzG3@b?C^^Xuo_+?1v(6S8aSOz!5bA8+ zVtA!6%6rf?4mp5?9q89Zsb+Y-@LJvJ)Ad5{6C1TakyP}Ix|I(Hp;R8~A8_TO80V!` zb3_H9;uY6m_ACF`RsOMC>9e)OZiEK2-+Ew~2fhR#;zJH5;-w4wHMJihHLj`76BRb+ zi6p|t1Q{CH0?$c{8~J|$B~FptKyqk}_mH&i!0@*bG-&?Y$mhs*7WP8}G@y-+b+6a9 zpi^iDpsICP3M~OH0#NIdoa|bmhZ#HR#SFL$$K`PdjVZUW%%d=%U@$hyJc2}0S(RvA z*|7EsP$l&rU$`Xyl~-c|vO&NFB_djbH}-CrVz4R$S2c8nwddhKAk;dWT0V z`^WF3H1PPL-$-;6qPno(Ol$4Qm;B?$xyNr3zyTx*PH1dbRNKRCb(#Q-%jeHo5Hy|}#x`A66D zsp-pFj$PUxEt|oigGaetUYi2%fzV`X3TyKUyEC}xN=$whpl0d_fVxT+%Pd^g)~!z% zZ~=~=0o3xk_+}xqXc5(JbGxEOsCLH2xp$bwbbwAaZ(b>pSuB+Emb+sukeOGZmre; z<lJ_K87eIuJ#h2`2gCui&HX1p zvL?M<3`ozGA-4WU`ysF+_Td5N$u)G&QTJ0+YIjae;F~?kQEu+(Z$2fpQ=d3Q4UQS~ zPJO(H8JZ9YJ(HRq{H0s;a*uiqGd#nvUWDqe8B{|0L(os=$`JdAF1JP;fzuLDiJO3% za~at(#D1USzE*2q6}LrrS?0+#??(0xG+6js_o}RI!b`qq%Mj~uqy24+(7Y->NAdEd z*Ie^{!!TNlk7q#R$tpa5;=Y`lWrIgm9Oe} zD|4?5u_m)^xjQtkib@;2inMw6uu`u`pVPf6SdWjV{3D$$OKb}rs}A*=gJV<@S8h$hlMZ;5O)|vxFJ%3-Hg9Zht(iRI+_~ni!j2l=V5n}^f=L!$#af2gh4j2i zE%Rz(^$uRWv3bXAbzj38rhBzg@(EtLifkETW53fTCco}8QRQFjBqyD8uDJ^oGU;Hr zZf21rQhcI&une(7S?KiMq)f2}HCKjM@6vAn@CDthm5g+}G%wjQ#Lf|EnwQYLTE*ysS9xsSeyv;{ zPt&~(@W%0~HpviMdZ%g=>wo5MhojvUV!Cd=3GYU{YHnqS-S>*iCi7~E z;#0h;isr4!S3gu0vvlvxcz?mGY?2|CzQSdbdE;rgFPZ9+%iIUaW7UWmx;Y(h1-wdX z8DiI!b=hRz#&~bUtH#H?ElyjR4_?&0P4M1{mrTr-A@=*FmWiQxo8s*1d2`4-$yax>m+8DhOYwLbtInztF=F?dzD%v-3r{qjICQ}^DAcNt!adA1C( z^3Ca1YqffTZoUooSGbkOGQ{dtu`VPu@9lUaFSs{0Zw=qv z&eFXta2CN!My!+}-zm$7(42SRtc6p>Lx$KpnG_GofAF&Iy%TpU+{z{yV)>tQ*<{|9 zczfejq|N)f8zcm?b+1+u2H_<+vSohD{&D#p^Ea%NN?^7LIV$A`KC*O^Gy??MQ zu`5_VQZ?3x9KyA7-&-y>D6umVKIu)Z|>{EsmN(}RM!Fw;>7*R3rYP_^9!D8Lp6>mqpBvrNyv2Q0@Qf<%C|>mph6{eEDDkdq-cfp(9oy4kQK-Qgy{kr%l##J;)9@*;RuDn0PN zfLDnuL#+RW)-r9<2Sq(JtS8u9FruDoSi^bN$_!oVFwWoUD)}L}6<$t;*lO3+?-fS}-NMVRw>cO7>+E>5X?IUbU7o#HK~8f5_1XOLoKhfPDt0-YsriA2_O9=5ldq z$mt^aVYqMLG^CIr_E;NhnsW?OoD6#e>@*k$2yzYkq@s;mGhdS$SfGpKzF?P4cY{zF zVpad(XO?JxauQr<+@o-H;J7i@xQ-0oZy<6j5^}moehjR&my;oOTym%-u+Xr6U{828 z!&1Fkwo@bhb(Q=$+*CM~SQ%o+nd4Hqb3Dhd&Is!dHrrvjhUKqsSmsJ6$B7PMPk=22 zQ-PHswx+1%`ZmKdNIAovRQZ1!PB~%R_kUU2m$@n-r;FqP$n6AElOjXx6sIj{Qnnf< zIm4a;+Y6@hZ`jpL5@|RyUr(s%BKc{sLq2k^Tlwb)lT@8H85c#)xM$#gfTJwu$`G47 z!HyOMZ%Abz-dwyKY{-@&)}y-Z>*VM|l*|hadlu}x!*UHPNlGwX%zV>fq`$6`2f-D} zat)shvGvR|X{&QQ$7v5?&w*V5MoQ!wmNv|mpn}!%K99E~UK-bI8DgdJ5|wTG5LKKE z8w^$+j0LmYxN(Px6a)OsHJ%G}VSj0)9-OMU46!WNj)vyVKqVcoQro=!@sir3-jWEZ z80aE-2-qE9Dp)ealCQL;FvqY|%718FCR}?s4I7Os+t{XAnQu#uxCgpO9t!pZ7%gVD z46$N`ZSatjz(T`@fjtkVf@Rn&CSI!H?*dc?p;^vuzwGAi(wp=q5rtj3O z2XyoXj|RF(o{Y6GsB+m1g(@qTg~ro&B-qegQ!r=3YFKQBDwVas;bb=^WOb1|73&O8 z<&PN}Bh>eRNJjRu{?BV0XhR4b0Gy1=vIy zB+$@2(?P!kRUI@#kr}Fkb{;F(B(W?${NTmRu!|9*#FQ%-;`x(j@aa5=-dISi)ibuF zlet;ZQq9vv@{3?aUv$$68DgEDaz9#P-k0!J#H)VZ49_q})_k7#%iNL>(na!2to1-q z&fXH2QW=!!;c^mOXj}l-299c*D?@AqSQCE7VXNebf1r!xSzv>~sv##sY$sS#)C}Vx zpA?)kZqd4h#XmH|96o#mp$5tfm(J7jgAu`Y zY0Txr2?%Gmz052JE;Uo(*)7JRfUG zSe0Hglmd07m%}R}7C_X2V0xNshHTbOwJw2Juv5fBJ~V?+Ej7bL))`b#RQ^J+rQ`d z4^;lFtKces@)FpQj?I=KR+Xc=bW5RmU&Wh+SLtuwG$syO{@JB_m*QQ8mqA#z8Ez)+ zwSJ;QLA8|G<8I-89Mj;hX_8F#_JIGKxis2L+qxft$yd|Lp-V)V%!^Wz2VfD z7tUlxzmPS4pwgK!mhvgbJj8~=FjQ(6|`u&GMSIHaUy2ELyT87xA$knCSbsTbr z<$w(aBS~@%8w5s@OxiCwwKuv*-UK!UjP;Ui8Ddo)vi10n3{&AXY_rB+3*hMMa*bOG zSDhyC3&~|5r;Fq*$gKfW?Uo@nmK3f{${9w{3=P`~wi`?tVb~!er$To?YG8pblDB~! z03-XeLU4LtgnKyx{+;$A5UuAfc%HuW~{m*bM1lM1xteD zn&IU~_0@1gM}8?;6*OHW@5TC?L$l4$gK4lww;RehG&IjApeZlsQ%2{S;Sj3v#!hzB zQOUvvx=8*MYhzeqkZp#A|B^LSt1jmG478V*HA5N7WlTCxezp)eMjS z9*^*2nD$~H8=Ho#xjx7GCM@kkt{DpVjV7fzbn-FDf(E)s-j6i|RnnRv+mhC)CeYA4 zUx1zmRmz$n-sL|@JNbA*Ru{C4^BKnT_hg{ecPegX1M0}f6Bo&Ln$&J zC+-OiIl?{h!`!42VU}Hw{F8Mfzm+~WO|}aqwl0#tH@hHoLhf>3*Ah8yE$)VEnt4StZwcYHVxqTz)ZG8i~q z?}*S0-}9l=T$^^~nqm5IO^Zz<^G6@&Ko`j;ur>r`%$IG3+i3P~bZCyDalN0pe!zU6 zmz5#bgwBK6cJPywe#AQnujU6b#QO4X*N{m15S55wKY>jJQ#)(e8AfAdd*;s`3v`kE zGuR@qRJH%I#HLd6GzCg