diff --git a/Package.resolved b/Package.resolved index c3a7d4a7d..c60f57425 100644 --- a/Package.resolved +++ b/Package.resolved @@ -1,5 +1,24 @@ { + "originHash" : "de9ffd5f38c1f245e5a3028d1c9ff43af00171e62802d1b0dfcb28c3486c7c54", "pins" : [ + { + "identity" : "jxl-coder-swift", + "kind" : "remoteSourceControl", + "location" : "https://github.com/awxkee/jxl-coder-swift.git", + "state" : { + "revision" : "179264567c7dc0dd489859d5572773222358a7f5", + "version" : "1.7.3" + } + }, + { + "identity" : "libwebp-ios", + "kind" : "remoteSourceControl", + "location" : "https://github.com/awxkee/libwebp-ios.git", + "state" : { + "revision" : "09ab26afc64c55a49332c396c4b465c278fcb05f", + "version" : "1.1.1" + } + }, { "identity" : "swift-syntax", "kind" : "remoteSourceControl", @@ -8,7 +27,16 @@ "revision" : "4c6cc0a3b9e8f14b3ae2307c5ccae4de6167ac2c", "version" : "600.0.0-prerelease-2024-06-12" } + }, + { + "identity" : "webp.swift", + "kind" : "remoteSourceControl", + "location" : "https://github.com/awxkee/webp.swift.git", + "state" : { + "revision" : "5ee6b41965c161b3adbe29f6c01a2b66b4944867", + "version" : "1.1.1" + } } ], - "version" : 2 + "version" : 3 } diff --git a/Package.swift b/Package.swift index 3ea312104..a96a8610a 100644 --- a/Package.swift +++ b/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version:5.7 +// swift-tools-version:5.10 import PackageDescription @@ -6,26 +6,56 @@ let package = Package( name: "swift-snapshot-testing", platforms: [ .iOS(.v13), - .macOS(.v10_15), + .macOS(.v12), .tvOS(.v13), - .watchOS(.v6), + .watchOS(.v8), ], products: [ .library( name: "SnapshotTesting", targets: ["SnapshotTesting"] ), + .library( + name: "JPEGXLImageSerializer", + targets: ["JPEGXLImageSerializer"] + ), + .library( + name: "WEBPImageSerializer", + targets: ["WEBPImageSerializer"] + ), .library( name: "InlineSnapshotTesting", targets: ["InlineSnapshotTesting"] ), ], dependencies: [ - .package(url: "https://github.com/swiftlang/swift-syntax", "509.0.0"..<"601.0.0-prerelease") + .package(url: "https://github.com/swiftlang/swift-syntax", "509.0.0"..<"601.0.0-prerelease"), + .package(url: "https://github.com/awxkee/jxl-coder-swift.git", from: "1.7.3"), + .package(url: "https://github.com/awxkee/webp.swift.git", from: "1.1.1"), ], targets: [ .target( - name: "SnapshotTesting" + name: "SnapshotTesting", + dependencies: [ + "ImageSerializer" + ] + ), + .target( + name: "ImageSerializer" + ), + .target( + name: "JPEGXLImageSerializer", + dependencies: [ + "ImageSerializer", + .product(name: "JxlCoder", package: "jxl-coder-swift") + ] + ), + .target( + name: "WEBPImageSerializer", + dependencies: [ + "ImageSerializer", + .product(name: "webp", package: "webp.swift") + ] ), .target( name: "InlineSnapshotTesting", diff --git a/Sources/ImageSerializer/ImageSerializer.swift b/Sources/ImageSerializer/ImageSerializer.swift new file mode 100644 index 000000000..1f2c9282e --- /dev/null +++ b/Sources/ImageSerializer/ImageSerializer.swift @@ -0,0 +1,54 @@ +import Foundation + +#if !os(macOS) +import UIKit.UIImage +/// A type alias for `UIImage` on iOS and `NSImage` on macOS. +package typealias SnapImage = UIImage +#else +import AppKit.NSImage +/// A type alias for `UIImage` on iOS and `NSImage` on macOS. +package typealias SnapImage = NSImage +#endif + +/// A structure responsible for encoding and decoding images. +/// +/// The `ImageSerializer` structure provides two closures: +/// - `encodeImage`: Encodes a `SnapImage` into `Data`. +/// - `decodeImage`: Decodes `Data` back into a `SnapImage`. +/// +/// These closures allow you to define custom image serialization logic for different image formats. +package struct ImageSerializer { + /// A closure that encodes a `SnapImage` into `Data`. + package var encodeImage: (_ image: SnapImage) -> Data? + + /// A closure that decodes `Data` into a `SnapImage`. + package var decodeImage: (_ data: Data) -> SnapImage? + + /// Initializes an `ImageSerializer` with custom encoding and decoding logic. + /// + /// - Parameters: + /// - encodeImage: A closure that defines how to encode a `SnapImage` into `Data`. + /// - decodeImage: A closure that defines how to decode `Data` into a `SnapImage`. + package init(encodeImage: @escaping (_: SnapImage) -> Data?, decodeImage: @escaping (_: Data) -> SnapImage?) { + self.encodeImage = encodeImage + self.decodeImage = decodeImage + } +} + +/// An enumeration of supported image formats. +/// +/// `ImageFormat` defines the formats that can be used for image serialization: +/// - `.jxl`: JPEG XL format. +/// - `.png`: PNG format. +/// - `.heic`: HEIC format. +/// - `.webp`: WEBP format. +/// +/// The `defaultValue` is set to `.png`. +public enum ImageFormat: String { + case jxl + case png + case heic + case webp + + public static var defaultValue = ImageFormat.png +} diff --git a/Sources/JPEGXLImageSerializer/JPEGXLImageSerializer.swift b/Sources/JPEGXLImageSerializer/JPEGXLImageSerializer.swift new file mode 100644 index 000000000..6882f634d --- /dev/null +++ b/Sources/JPEGXLImageSerializer/JPEGXLImageSerializer.swift @@ -0,0 +1,25 @@ +import Foundation +import JxlCoder +import ImageSerializer + +extension ImageSerializer { + /// A static property that provides an `ImageSerializer` for the JPEG XL format. + /// + /// This property uses the `JXLCoder` to encode and decode images in the JPEG XL format. + /// + /// - Returns: An `ImageSerializer` instance configured for encoding and decoding JPEG XL images. + /// + /// - Encoding: + /// - The `encodeImage` closure uses `JXLCoder.encode(image:)` to convert a `SnapImage` into `Data`. + /// - Decoding: + /// - The `decodeImage` closure uses `JXLCoder.decode(data:)` to convert `Data` back into a `SnapImage`. + /// + /// - Note: The encoding and decoding operations are performed using the `JXLCoder` library, which supports the JPEG XL format. + package static var jxl: Self { + return ImageSerializer { image in + try? JXLCoder.encode(image: image) + } decodeImage: { data in + try? JXLCoder.decode(data: data) + } + } +} diff --git a/Sources/SnapshotTesting/AssertSnapshot.swift b/Sources/SnapshotTesting/AssertSnapshot.swift index 24868e082..4d9345503 100644 --- a/Sources/SnapshotTesting/AssertSnapshot.swift +++ b/Sources/SnapshotTesting/AssertSnapshot.swift @@ -81,6 +81,9 @@ public var __record: SnapshotTestingConfiguration.Record = { return .missing }() +/// We can set the image format globally to better test +public var imageFormat = ImageFormat.defaultValue + /// Asserts that a given value matches a reference on disk. /// /// - Parameters: diff --git a/Sources/SnapshotTesting/Snapshotting/CALayer.swift b/Sources/SnapshotTesting/Snapshotting/CALayer.swift index 74c512c12..5b6512c6c 100644 --- a/Sources/SnapshotTesting/Snapshotting/CALayer.swift +++ b/Sources/SnapshotTesting/Snapshotting/CALayer.swift @@ -14,7 +14,7 @@ /// assertSnapshot(of: layer, as: .image(precision: 0.99)) /// ``` public static var image: Snapshotting { - return .image(precision: 1) + return .image(precision: 1, format: imageFormat) } /// A snapshot strategy for comparing layers based on pixel equality. @@ -25,9 +25,9 @@ /// match. 98-99% mimics /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. - public static func image(precision: Float, perceptualPrecision: Float = 1) -> Snapshotting { + public static func image(precision: Float, perceptualPrecision: Float = 1, format: ImageFormat) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision + precision: precision, perceptualPrecision: perceptualPrecision, format: format ).pullback { layer in let image = NSImage(size: layer.bounds.size) image.lockFocus() @@ -46,7 +46,7 @@ extension Snapshotting where Value == CALayer, Format == UIImage { /// A snapshot strategy for comparing layers based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing layers based on pixel equality. @@ -59,12 +59,12 @@ /// human eye. /// - traits: A trait collection override. public static func image( - precision: Float = 1, perceptualPrecision: Float = 1, traits: UITraitCollection = .init() + precision: Float = 1, perceptualPrecision: Float = 1, traits: UITraitCollection = .init(), format: ImageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale + precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale, format: format ).pullback { layer in renderer(bounds: layer.bounds, for: traits).image { ctx in layer.setNeedsLayout() diff --git a/Sources/SnapshotTesting/Snapshotting/CGPath.swift b/Sources/SnapshotTesting/Snapshotting/CGPath.swift index 65470605c..b64df6483 100644 --- a/Sources/SnapshotTesting/Snapshotting/CGPath.swift +++ b/Sources/SnapshotTesting/Snapshotting/CGPath.swift @@ -1,4 +1,5 @@ #if os(macOS) + import AppKit import Cocoa import CoreGraphics @@ -6,7 +7,7 @@ extension Snapshotting where Value == CGPath, Format == NSImage { /// A snapshot strategy for comparing bezier paths based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing bezier paths based on pixel equality. @@ -29,10 +30,11 @@ public static func image( precision: Float = 1, perceptualPrecision: Float = 1, - drawingMode: CGPathDrawingMode = .eoFill + drawingMode: CGPathDrawingMode = .eoFill, + format: ImageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision + precision: precision, perceptualPrecision: perceptualPrecision, format: format ).pullback { path in let bounds = path.boundingBoxOfPath var transform = CGAffineTransform(translationX: -bounds.origin.x, y: -bounds.origin.y) @@ -52,10 +54,11 @@ #elseif os(iOS) || os(tvOS) import UIKit + extension Snapshotting where Value == CGPath, Format == UIImage { /// A snapshot strategy for comparing bezier paths based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing bezier paths based on pixel equality. @@ -68,10 +71,10 @@ /// human eye. public static func image( precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat = 1, - drawingMode: CGPathDrawingMode = .eoFill + drawingMode: CGPathDrawingMode = .eoFill, format: ImageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: scale + precision: precision, perceptualPrecision: perceptualPrecision, scale: scale, format: format ).pullback { path in let bounds = path.boundingBoxOfPath let format: UIGraphicsImageRendererFormat diff --git a/Sources/SnapshotTesting/Snapshotting/HEICImageSerializer.swift b/Sources/SnapshotTesting/Snapshotting/HEICImageSerializer.swift new file mode 100644 index 000000000..69bb8be10 --- /dev/null +++ b/Sources/SnapshotTesting/Snapshotting/HEICImageSerializer.swift @@ -0,0 +1,76 @@ +import Foundation +import ImageIO +import UniformTypeIdentifiers +import ImageSerializer + +#if canImport(UIKit) +import UIKit +#endif +#if canImport(AppKit) +import AppKit +#endif + +@available(iOS 14.0, macOS 11.0, tvOS 14.0, watchOS 7.0, *) +/// A struct that provides encoding and decoding functionality for HEIC images. +/// +/// `HEICCoder` supports encoding images to HEIC format and decoding HEIC data back into images. +/// +/// - Note: The HEIC format is only supported on iOS 14.0+ and macOS 10.15+. +struct HEICCoder { + /// Encodes a `SnapImage` into HEIC format. + /// + /// This method converts a `SnapImage` to `Data` using the HEIC format. + /// + /// - Parameter image: The image to be encoded. This can be a `UIImage` on iOS or an `NSImage` on macOS. + /// + /// - Returns: The encoded image data in HEIC format, or `nil` if encoding fails. + /// + /// - Note: The encoding quality is set to 0.8 (lossy compression). On macOS, the image is created using `CGImageDestinationCreateWithData`. + static func encodeImage(_ image: SnapImage) -> Data? { +#if !os(macOS) + guard let cgImage = image.cgImage else { return nil } +#else + guard let cgImage = image.cgImage(forProposedRect: nil, context: nil, hints: nil) else { return nil } +#endif + + let data = NSMutableData() + guard let destination = CGImageDestinationCreateWithData(data, UTType.heic.identifier as CFString, 1, nil) else { return nil } + CGImageDestinationAddImage(destination, cgImage, [kCGImageDestinationLossyCompressionQuality: 0.8] as CFDictionary) + guard CGImageDestinationFinalize(destination) else { return nil } + return data as Data + } + + /// Decodes HEIC image data into a `SnapImage`. + /// + /// This method converts HEIC image data back into a `SnapImage`. + /// + /// - Parameter data: The HEIC data to be decoded. + /// + /// - Returns: The decoded image as `SnapImage`, or `nil` if decoding fails. + /// + /// - Note: On iOS, this returns a `UIImage`, while on macOS, it returns an `NSImage`. + static func decodeImage(_ data: Data) -> SnapImage? { +#if !os(macOS) + return UIImage(data: data) +#else + return NSImage(data: data) +#endif + } +} + +@available(iOS 14.0, macOS 11.0, tvOS 14.0, watchOS 7.0, *) +extension ImageSerializer { + /// A static property that provides an `ImageSerializer` configured for HEIC format. + /// + /// This property creates an `ImageSerializer` instance that uses `HEICCoder` to handle encoding and decoding of HEIC images. + /// + /// - Returns: An `ImageSerializer` instance configured for HEIC format. + /// + /// - Note: This property is available only on iOS 14.0 and later. + package static var heic: ImageSerializer { + ImageSerializer( + encodeImage: HEICCoder.encodeImage, + decodeImage: HEICCoder.decodeImage + ) + } +} diff --git a/Sources/SnapshotTesting/Snapshotting/ImageCoder.swift b/Sources/SnapshotTesting/Snapshotting/ImageCoder.swift new file mode 100644 index 000000000..d29ce5bac --- /dev/null +++ b/Sources/SnapshotTesting/Snapshotting/ImageCoder.swift @@ -0,0 +1,87 @@ +import Foundation +@_exported import ImageSerializer + +#if canImport(JPEGXLImageSerializer) +import JPEGXLImageSerializer +#endif + +#if canImport(WEBPImageSerializer) +import WEBPImageSerializer +#endif + +/// Encodes an image into the specified format. +/// +/// This function takes a `SnapImage` and encodes it into a `Data` representation using the specified `ImageFormat`. +/// +/// - Parameters: +/// - image: The image to be encoded. This can be a `UIImage` on iOS or an `NSImage` on macOS. +/// - format: The format to encode the image into. Supported formats are `.png`, `.heic`, and `.jxl`. +/// +/// - Returns: The encoded image as `Data`, or `nil` if encoding fails. +/// +/// - Note: +/// - If the `.heic` format is selected and the platform does not support HEIC (iOS 14.0+), the image will be encoded as PNG. +/// - If the `.jxl` format is selected but `JPEGXLImageSerializer` is not available, the image will be encoded as PNG. +package func EncodeImage(image: SnapImage, _ format: ImageFormat) -> Data? { + var serializer: ImageSerializer + switch format { +#if canImport(JPEGXLImageSerializer) + case .jxl: serializer = ImageSerializer.jxl +#else + case .jxl: serializer = ImageSerializer.png +#endif + case .png: serializer = ImageSerializer.png + case .webp: + serializer = ImageSerializer.png +#if canImport(WEBPImageSerializer) + if #available(iOS 13.0, macOS 10.10, tvOS 13.0, watchOS 6.0, *) { + serializer = ImageSerializer.webp + } +#endif + case .heic: + serializer = ImageSerializer.png + if #available(iOS 14.0, macOS 11.0, tvOS 14.0, watchOS 7.0, *) { + serializer = ImageSerializer.heic + } + } + return serializer.encodeImage(image) +} + +/// Decodes image data into a `SnapImage` of the specified format. +/// +/// This function takes `Data` representing an encoded image and decodes it back into a `SnapImage`. +/// +/// - Parameters: +/// - data: The data to be decoded into an image. +/// - format: The format of the image data. Supported formats are `.png`, `.heic`, and `.jxl`. +/// +/// - Returns: The decoded `SnapImage`, or `nil` if decoding fails. +/// +/// - Note: +/// - If the `.heic` format is selected and the platform does not support HEIC (iOS 14.0+), the image will be decoded as PNG. +/// - If the `.jxl` format is selected but `JPEGXLImageSerializer` is not available, the image will be decoded as PNG. +package func DecodeImage(data: Data, _ format: ImageFormat) -> SnapImage? { + var serializer: ImageSerializer + switch format { +#if canImport(JPEGXLImageSerializer) + case .jxl: serializer = ImageSerializer.jxl +#else + case .jxl: serializer = ImageSerializer.png +#endif + case .png: serializer = ImageSerializer.png + case .webp: + serializer = ImageSerializer.png +#if canImport(WEBPImageSerializer) + if #available(iOS 13.0, macOS 10.10, tvOS 13.0, watchOS 6.0, *) { + serializer = ImageSerializer.webp + } +#endif + case .heic: + if #available(iOS 14.0, macOS 11.0, tvOS 14.0, watchOS 7.0, *) { + serializer = ImageSerializer.heic + } else { + serializer = ImageSerializer.png + } + } + return serializer.decodeImage(data) +} diff --git a/Sources/SnapshotTesting/Snapshotting/NSBezierPath.swift b/Sources/SnapshotTesting/Snapshotting/NSBezierPath.swift index b84a59bf3..67699b759 100644 --- a/Sources/SnapshotTesting/Snapshotting/NSBezierPath.swift +++ b/Sources/SnapshotTesting/Snapshotting/NSBezierPath.swift @@ -5,7 +5,7 @@ extension Snapshotting where Value == NSBezierPath, Format == NSImage { /// A snapshot strategy for comparing bezier paths based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing bezier paths based on pixel equality. @@ -24,9 +24,9 @@ /// match. 98-99% mimics /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1) -> Snapshotting { + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, format: ImageFormat) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision + precision: precision, perceptualPrecision: perceptualPrecision, format: format ).pullback { path in // Move path info frame: let bounds = path.bounds diff --git a/Sources/SnapshotTesting/Snapshotting/NSImage.swift b/Sources/SnapshotTesting/Snapshotting/NSImage.swift index be4fd7cd4..0e12da0ac 100644 --- a/Sources/SnapshotTesting/Snapshotting/NSImage.swift +++ b/Sources/SnapshotTesting/Snapshotting/NSImage.swift @@ -4,7 +4,7 @@ extension Diffing where Value == NSImage { /// A pixel-diffing strategy for NSImage's which requires a 100% match. - public static let image = Diffing.image() + public static let image = Diffing.image(format: imageFormat) /// A pixel-diffing strategy for NSImage that allows customizing how precise the matching must be. /// @@ -15,14 +15,14 @@ /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. /// - Returns: A new diffing strategy. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1) -> Diffing { + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, format: ImageFormat) -> Diffing { return .init( - toData: { NSImagePNGRepresentation($0)! }, - fromData: { NSImage(data: $0)! } + toData: { EncodeImage(image: $0, format)! }, + fromData: { DecodeImage(data: $0, format)! } ) { old, new in guard let message = compare( - old, new, precision: precision, perceptualPrecision: perceptualPrecision) + old, new, precision: precision, perceptualPrecision: perceptualPrecision, format: format) else { return nil } let difference = SnapshotTesting.diff(old, new) let oldAttachment = XCTAttachment(image: old) @@ -42,7 +42,7 @@ extension Snapshotting where Value == NSImage, Format == NSImage { /// A snapshot strategy for comparing images based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing images based on pixel equality. @@ -53,24 +53,15 @@ /// match. 98-99% mimics /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1) -> Snapshotting { + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, format: ImageFormat) -> Snapshotting { return .init( - pathExtension: "png", - diffing: .image(precision: precision, perceptualPrecision: perceptualPrecision) + pathExtension: format.rawValue, + diffing: .image(precision: precision, perceptualPrecision: perceptualPrecision, format: format) ) } } - private func NSImagePNGRepresentation(_ image: NSImage) -> Data? { - guard let cgImage = image.cgImage(forProposedRect: nil, context: nil, hints: nil) else { - return nil - } - let rep = NSBitmapImageRep(cgImage: cgImage) - rep.size = image.size - return rep.representation(using: .png, properties: [:]) - } - - private func compare(_ old: NSImage, _ new: NSImage, precision: Float, perceptualPrecision: Float) + private func compare(_ old: NSImage, _ new: NSImage, precision: Float, perceptualPrecision: Float, format: ImageFormat) -> String? { guard let oldCgImage = old.cgImage(forProposedRect: nil, context: nil, hints: nil) else { @@ -94,8 +85,8 @@ let byteCount = oldContext.height * oldContext.bytesPerRow if memcmp(oldData, newData, byteCount) == 0 { return nil } guard - let pngData = NSImagePNGRepresentation(new), - let newerCgImage = NSImage(data: pngData)?.cgImage( + let imageData = EncodeImage(image: new, format), + let newerCgImage = NSImage(data: imageData)?.cgImage( forProposedRect: nil, context: nil, hints: nil), let newerContext = context(for: newerCgImage), let newerData = newerContext.data diff --git a/Sources/SnapshotTesting/Snapshotting/NSView.swift b/Sources/SnapshotTesting/Snapshotting/NSView.swift index b2e7edfb0..9db6d47b0 100644 --- a/Sources/SnapshotTesting/Snapshotting/NSView.swift +++ b/Sources/SnapshotTesting/Snapshotting/NSView.swift @@ -5,7 +5,7 @@ extension Snapshotting where Value == NSView, Format == NSImage { /// A snapshot strategy for comparing views based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing views based on pixel equality. @@ -21,10 +21,10 @@ /// human eye. /// - size: A view size override. public static func image( - precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil + precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil, format: ImageFormat = imageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision + precision: precision, perceptualPrecision: perceptualPrecision, format: imageFormat ).asyncPullback { view in let initialSize = view.frame.size if let size = size { view.frame.size = size } diff --git a/Sources/SnapshotTesting/Snapshotting/NSViewController.swift b/Sources/SnapshotTesting/Snapshotting/NSViewController.swift index 69ec72dde..2ee17a0b4 100644 --- a/Sources/SnapshotTesting/Snapshotting/NSViewController.swift +++ b/Sources/SnapshotTesting/Snapshotting/NSViewController.swift @@ -5,7 +5,7 @@ extension Snapshotting where Value == NSViewController, Format == NSImage { /// A snapshot strategy for comparing view controller views based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing view controller views based on pixel equality. @@ -18,10 +18,10 @@ /// human eye. /// - size: A view size override. public static func image( - precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil + precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil, format: ImageFormat ) -> Snapshotting { return Snapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, size: size + precision: precision, perceptualPrecision: perceptualPrecision, size: size, format: format ).pullback { $0.view } } } diff --git a/Sources/SnapshotTesting/Snapshotting/PNGImageSerializer.swift b/Sources/SnapshotTesting/Snapshotting/PNGImageSerializer.swift new file mode 100644 index 000000000..c0669dab1 --- /dev/null +++ b/Sources/SnapshotTesting/Snapshotting/PNGImageSerializer.swift @@ -0,0 +1,40 @@ +import Foundation +import ImageSerializer + +#if canImport(UIKit) +import UIKit +#endif +#if canImport(AppKit) +import AppKit +#endif + +struct PNGCoder { + static func encodeImage(_ image: SnapImage) -> Data? { +#if !os(macOS) + return image.pngData() +#else + guard let cgImage = image.cgImage(forProposedRect: nil, context: nil, hints: nil) else { + return nil + } + let bitmapRep = NSBitmapImageRep(cgImage: cgImage) + return bitmapRep.representation(using: .png, properties: [:]) +#endif + } + + static func decodeImage(_ data: Data) -> SnapImage? { +#if !os(macOS) + return UIImage(data: data) +#else + return NSImage(data: data) +#endif + } +} + +extension ImageSerializer { + package static var png: Self { + ImageSerializer( + encodeImage: PNGCoder.encodeImage, + decodeImage: PNGCoder.decodeImage + ) + } +} diff --git a/Sources/SnapshotTesting/Snapshotting/SceneKit.swift b/Sources/SnapshotTesting/Snapshotting/SceneKit.swift index 94ff90459..c205b0477 100644 --- a/Sources/SnapshotTesting/Snapshotting/SceneKit.swift +++ b/Sources/SnapshotTesting/Snapshotting/SceneKit.swift @@ -17,10 +17,10 @@ /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. /// - size: The size of the scene. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize) + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize, format: ImageFormat) -> Snapshotting { - return .scnScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size) + return .scnScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size, format: format) } } #elseif os(iOS) || os(tvOS) @@ -34,20 +34,20 @@ /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. /// - size: The size of the scene. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize) + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize, format: ImageFormat) -> Snapshotting { - return .scnScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size) + return .scnScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size, format: format) } } #endif extension Snapshotting where Value == SCNScene, Format == Image { - fileprivate static func scnScene(precision: Float, perceptualPrecision: Float, size: CGSize) + fileprivate static func scnScene(precision: Float, perceptualPrecision: Float, size: CGSize, format: ImageFormat) -> Snapshotting { return Snapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision + precision: precision, perceptualPrecision: perceptualPrecision, format: format ).pullback { scene in let view = SCNView(frame: .init(x: 0, y: 0, width: size.width, height: size.height)) view.scene = scene diff --git a/Sources/SnapshotTesting/Snapshotting/SpriteKit.swift b/Sources/SnapshotTesting/Snapshotting/SpriteKit.swift index ad515050a..5d3ea0ccb 100644 --- a/Sources/SnapshotTesting/Snapshotting/SpriteKit.swift +++ b/Sources/SnapshotTesting/Snapshotting/SpriteKit.swift @@ -17,10 +17,10 @@ /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. /// - size: The size of the scene. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize) + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize, format: ImageFormat) -> Snapshotting { - return .skScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size) + return .skScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size, format: format) } } #elseif os(iOS) || os(tvOS) @@ -34,20 +34,20 @@ /// [the precision](http://zschuessler.github.io/DeltaE/learn/#toc-defining-delta-e) of the /// human eye. /// - size: The size of the scene. - public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize) + public static func image(precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize, format: ImageFormat) -> Snapshotting { - return .skScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size) + return .skScene(precision: precision, perceptualPrecision: perceptualPrecision, size: size, format: format) } } #endif extension Snapshotting where Value == SKScene, Format == Image { - fileprivate static func skScene(precision: Float, perceptualPrecision: Float, size: CGSize) + fileprivate static func skScene(precision: Float, perceptualPrecision: Float, size: CGSize, format: ImageFormat) -> Snapshotting { return Snapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision + precision: precision, perceptualPrecision: perceptualPrecision, format: format ).pullback { scene in let view = SKView(frame: .init(x: 0, y: 0, width: size.width, height: size.height)) view.presentScene(scene) diff --git a/Sources/SnapshotTesting/Snapshotting/SwiftUIView.swift b/Sources/SnapshotTesting/Snapshotting/SwiftUIView.swift index 8d85e1f0b..43e4321f1 100644 --- a/Sources/SnapshotTesting/Snapshotting/SwiftUIView.swift +++ b/Sources/SnapshotTesting/Snapshotting/SwiftUIView.swift @@ -20,7 +20,7 @@ /// A snapshot strategy for comparing SwiftUI Views based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing SwiftUI Views based on pixel equality. @@ -41,7 +41,8 @@ precision: Float = 1, perceptualPrecision: Float = 1, layout: SwiftUISnapshotLayout = .sizeThatFits, - traits: UITraitCollection = .init() + traits: UITraitCollection = .init(), + format: ImageFormat = imageFormat ) -> Snapshotting { @@ -60,10 +61,9 @@ } return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale + precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale, format: format ).asyncPullback { view in var config = config - let controller: UIViewController if config.size != nil { diff --git a/Sources/SnapshotTesting/Snapshotting/UIBezierPath.swift b/Sources/SnapshotTesting/Snapshotting/UIBezierPath.swift index 6b48d622d..057d7ecde 100644 --- a/Sources/SnapshotTesting/Snapshotting/UIBezierPath.swift +++ b/Sources/SnapshotTesting/Snapshotting/UIBezierPath.swift @@ -4,7 +4,7 @@ extension Snapshotting where Value == UIBezierPath, Format == UIImage { /// A snapshot strategy for comparing bezier paths based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing bezier paths based on pixel equality. @@ -17,10 +17,10 @@ /// human eye. /// - scale: The scale to use when loading the reference image from disk. public static func image( - precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat = 1 + precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat = 1, format imgFormat: ImageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: scale + precision: precision, perceptualPrecision: perceptualPrecision, scale: scale, format: imgFormat ).pullback { path in let bounds = path.bounds let format: UIGraphicsImageRendererFormat diff --git a/Sources/SnapshotTesting/Snapshotting/UIImage.swift b/Sources/SnapshotTesting/Snapshotting/UIImage.swift index 3d1bb5319..ea4cc253c 100644 --- a/Sources/SnapshotTesting/Snapshotting/UIImage.swift +++ b/Sources/SnapshotTesting/Snapshotting/UIImage.swift @@ -4,7 +4,7 @@ extension Diffing where Value == UIImage { /// A pixel-diffing strategy for UIImage's which requires a 100% match. - public static let image = Diffing.image() + public static let image = Diffing.image(format: imageFormat) /// A pixel-diffing strategy for UIImage that allows customizing how precise the matching must be. /// @@ -18,7 +18,7 @@ /// `UITraitCollection`s default value of `0.0`, the screens scale is used. /// - Returns: A new diffing strategy. public static func image( - precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat? = nil + precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat? = nil, format: ImageFormat ) -> Diffing { let imageScale: CGFloat if let scale = scale, scale != 0.0 { @@ -28,12 +28,12 @@ } return Diffing( - toData: { $0.pngData() ?? emptyImage().pngData()! }, - fromData: { UIImage(data: $0, scale: imageScale)! } + toData: { EncodeImage(image: $0, format)! }, + fromData: { DecodeImage(data: $0, format)! } ) { old, new in guard let message = compare( - old, new, precision: precision, perceptualPrecision: perceptualPrecision) + old, new, precision: precision, perceptualPrecision: perceptualPrecision, format: format) else { return nil } let difference = SnapshotTesting.diff(old, new) let oldAttachment = XCTAttachment(image: old) @@ -65,7 +65,7 @@ extension Snapshotting where Value == UIImage, Format == UIImage { /// A snapshot strategy for comparing images based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing images based on pixel equality. @@ -78,12 +78,12 @@ /// human eye. /// - scale: The scale of the reference image stored on disk. public static func image( - precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat? = nil + precision: Float = 1, perceptualPrecision: Float = 1, scale: CGFloat? = nil, format: ImageFormat ) -> Snapshotting { return .init( - pathExtension: "png", + pathExtension: format.rawValue, diffing: .image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: scale) + precision: precision, perceptualPrecision: perceptualPrecision, scale: scale, format: format) ) } } @@ -93,7 +93,7 @@ private let imageContextBitsPerComponent = 8 private let imageContextBytesPerPixel = 4 - private func compare(_ old: UIImage, _ new: UIImage, precision: Float, perceptualPrecision: Float) + private func compare(_ old: UIImage, _ new: UIImage, precision: Float, perceptualPrecision: Float, format: ImageFormat) -> String? { guard let oldCgImage = old.cgImage else { @@ -119,8 +119,8 @@ } var newerBytes = [UInt8](repeating: 0, count: byteCount) guard - let pngData = new.pngData(), - let newerCgImage = UIImage(data: pngData)?.cgImage, + let imgData = EncodeImage(image: new, format), + let newerCgImage = UIImage(data: imgData)?.cgImage, let newerContext = context(for: newerCgImage, data: &newerBytes), let newerData = newerContext.data else { diff --git a/Sources/SnapshotTesting/Snapshotting/UIView.swift b/Sources/SnapshotTesting/Snapshotting/UIView.swift index 7244f67d1..d7530e2e2 100644 --- a/Sources/SnapshotTesting/Snapshotting/UIView.swift +++ b/Sources/SnapshotTesting/Snapshotting/UIView.swift @@ -4,7 +4,7 @@ extension Snapshotting where Value == UIView, Format == UIImage { /// A snapshot strategy for comparing views based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing views based on pixel equality. @@ -25,13 +25,14 @@ precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil, - traits: UITraitCollection = .init() + traits: UITraitCollection = .init(), + format: ImageFormat = imageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale + precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale, format: format ).asyncPullback { view in snapshotView( config: .init(safeArea: .zero, size: size ?? view.frame.size, traits: .init()), diff --git a/Sources/SnapshotTesting/Snapshotting/UIViewController.swift b/Sources/SnapshotTesting/Snapshotting/UIViewController.swift index b08b8bf59..0c80a27e3 100644 --- a/Sources/SnapshotTesting/Snapshotting/UIViewController.swift +++ b/Sources/SnapshotTesting/Snapshotting/UIViewController.swift @@ -4,7 +4,7 @@ extension Snapshotting where Value == UIViewController, Format == UIImage { /// A snapshot strategy for comparing view controller views based on pixel equality. public static var image: Snapshotting { - return .image() + return .image(format: imageFormat) } /// A snapshot strategy for comparing view controller views based on pixel equality. @@ -23,13 +23,14 @@ precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil, - traits: UITraitCollection = .init() + traits: UITraitCollection = .init(), + format: ImageFormat = imageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale + precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale, format: format ).asyncPullback { viewController in snapshotView( config: size.map { .init(safeArea: config.safeArea, size: $0, traits: config.traits) } @@ -60,13 +61,14 @@ precision: Float = 1, perceptualPrecision: Float = 1, size: CGSize? = nil, - traits: UITraitCollection = .init() + traits: UITraitCollection = .init(), + format: ImageFormat ) -> Snapshotting { return SimplySnapshotting.image( - precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale + precision: precision, perceptualPrecision: perceptualPrecision, scale: traits.displayScale, format: format ).asyncPullback { viewController in snapshotView( config: .init(safeArea: .zero, size: size, traits: traits), diff --git a/Sources/WEBPImageSerializer/WEBPImageSerialize.swift b/Sources/WEBPImageSerializer/WEBPImageSerialize.swift new file mode 100644 index 000000000..30358240a --- /dev/null +++ b/Sources/WEBPImageSerializer/WEBPImageSerialize.swift @@ -0,0 +1,26 @@ +import Foundation +import webp +import ImageSerializer + +extension ImageSerializer { + /// A static property that provides an `ImageSerializer` for the WebP format. + /// + /// This property uses the `WebPEncoder` and `WebPDecoder` to encode and decode images in the WebP format. + /// + /// - Returns: An `ImageSerializer` instance configured for encoding and decoding WebP images. + /// + /// - Encoding: + /// - The `encodeImage` closure uses `WebPEncoder.encode(_:config:)` to convert a `SnapImage` into `Data` with the specified encoding configuration. + /// - The configuration used is `.preset(.picture, quality: 80)`, which applies a preset for general picture quality. + /// - Decoding: + /// - The `decodeImage` closure uses `WebPDecoder.decode(toImage:options:)` to convert `Data` back into a `SnapImage` with specified decoding options. + /// + /// - Note: The encoding and decoding operations are performed using the `webp` library, which supports the WebP format. + package static var webp: Self { + return ImageSerializer { image in + try? WebPEncoder().encode(image, config: .preset(.picture, quality: 80)) + } decodeImage: { data in + try? WebPDecoder().decode(toImage: data, options: WebpDecoderOptions()) + } + } +}