forked from python-pillow/Pillow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_image_transform.py
261 lines (198 loc) · 8.63 KB
/
test_image_transform.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
import math
from helper import unittest, PillowTestCase, hopper
from PIL import Image
class TestImageTransform(PillowTestCase):
def test_sanity(self):
from PIL import ImageTransform
im = Image.new("L", (100, 100))
seq = tuple(range(10))
transform = ImageTransform.AffineTransform(seq[:6])
im.transform((100, 100), transform)
transform = ImageTransform.ExtentTransform(seq[:4])
im.transform((100, 100), transform)
transform = ImageTransform.QuadTransform(seq[:8])
im.transform((100, 100), transform)
transform = ImageTransform.MeshTransform([(seq[:4], seq[:8])])
im.transform((100, 100), transform)
def test_extent(self):
im = hopper('RGB')
(w, h) = im.size
transformed = im.transform(im.size, Image.EXTENT,
(0, 0,
w//2, h//2), # ul -> lr
Image.BILINEAR)
scaled = im.resize((w*2, h*2), Image.BILINEAR).crop((0, 0, w, h))
# undone -- precision?
self.assert_image_similar(transformed, scaled, 23)
def test_quad(self):
# one simple quad transform, equivalent to scale & crop upper left quad
im = hopper('RGB')
(w, h) = im.size
transformed = im.transform(im.size, Image.QUAD,
(0, 0, 0, h//2,
# ul -> ccw around quad:
w//2, h//2, w//2, 0),
Image.BILINEAR)
scaled = im.transform((w, h), Image.AFFINE,
(.5, 0, 0, 0, .5, 0),
Image.BILINEAR)
self.assert_image_equal(transformed, scaled)
def test_mesh(self):
# this should be a checkerboard of halfsized hoppers in ul, lr
im = hopper('RGBA')
(w, h) = im.size
transformed = im.transform(im.size, Image.MESH,
[((0, 0, w//2, h//2), # box
(0, 0, 0, h,
w, h, w, 0)), # ul -> ccw around quad
((w//2, h//2, w, h), # box
(0, 0, 0, h,
w, h, w, 0))], # ul -> ccw around quad
Image.BILINEAR)
scaled = im.transform((w//2, h//2), Image.AFFINE,
(2, 0, 0, 0, 2, 0),
Image.BILINEAR)
checker = Image.new('RGBA', im.size)
checker.paste(scaled, (0, 0))
checker.paste(scaled, (w//2, h//2))
self.assert_image_equal(transformed, checker)
# now, check to see that the extra area is (0, 0, 0, 0)
blank = Image.new('RGBA', (w//2, h//2), (0, 0, 0, 0))
self.assert_image_equal(blank, transformed.crop((w//2, 0, w, h//2)))
self.assert_image_equal(blank, transformed.crop((0, h//2, w//2, h)))
def _test_alpha_premult(self, op):
# create image with half white, half black,
# with the black half transparent.
# do op,
# there should be no darkness in the white section.
im = Image.new('RGBA', (10, 10), (0, 0, 0, 0))
im2 = Image.new('RGBA', (5, 10), (255, 255, 255, 255))
im.paste(im2, (0, 0))
im = op(im, (40, 10))
im_background = Image.new('RGB', (40, 10), (255, 255, 255))
im_background.paste(im, (0, 0), im)
hist = im_background.histogram()
self.assertEqual(40*10, hist[-1])
def test_alpha_premult_resize(self):
def op(im, sz):
return im.resize(sz, Image.BILINEAR)
self._test_alpha_premult(op)
def test_alpha_premult_transform(self):
def op(im, sz):
(w, h) = im.size
return im.transform(sz, Image.EXTENT,
(0, 0,
w, h),
Image.BILINEAR)
self._test_alpha_premult(op)
def test_blank_fill(self):
# attempting to hit
# https://github.com/python-pillow/Pillow/issues/254 reported
#
# issue is that transforms with transparent overflow area
# contained junk from previous images, especially on systems with
# constrained memory. So, attempt to fill up memory with a
# pattern, free it, and then run the mesh test again. Using a 1Mp
# image with 4 bands, for 4 megs of data allocated, x 64. OMM (64
# bit 12.04 VM with 512 megs available, this fails with Pillow <
# a0eaf06cc5f62a6fb6de556989ac1014ff3348ea
#
# Running by default, but I'd totally understand not doing it in
# the future
pattern = [
Image.new('RGBA', (1024, 1024), (a, a, a, a))
for a in range(1, 65)
]
# Yeah. Watch some JIT optimize this out.
pattern = None
self.test_mesh()
def test_missing_method_data(self):
self.assertRaises(ValueError, lambda:
hopper().transform((100, 100), None))
class TestImageTransformAffine(PillowTestCase):
transform = Image.AFFINE
def _test_image(self):
im = hopper('RGB')
return im.crop((10, 20, im.width - 10, im.height - 20))
def _test_rotate(self, deg, transpose):
im = self._test_image()
angle = - math.radians(deg)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0,
0, 0]
matrix[2] = (1 - matrix[0] - matrix[1]) * im.width / 2
matrix[5] = (1 - matrix[3] - matrix[4]) * im.height / 2
if transpose is not None:
transposed = im.transpose(transpose)
else:
transposed = im
for resample in [Image.NEAREST, Image.BILINEAR, Image.BICUBIC]:
transformed = im.transform(transposed.size, self.transform,
matrix, resample)
self.assert_image_equal(transposed, transformed)
def test_rotate_0_deg(self):
self._test_rotate(0, None)
def test_rotate_90_deg(self):
self._test_rotate(90, Image.ROTATE_90)
def test_rotate_180_deg(self):
self._test_rotate(180, Image.ROTATE_180)
def test_rotate_270_deg(self):
self._test_rotate(270, Image.ROTATE_270)
def _test_resize(self, scale, epsilonscale):
im = self._test_image()
size_up = int(round(im.width * scale)), int(round(im.height * scale))
matrix_up = [
1 / scale, 0, 0,
0, 1 / scale, 0,
0, 0]
matrix_down = [
scale, 0, 0,
0, scale, 0,
0, 0]
for resample, epsilon in [(Image.NEAREST, 0),
(Image.BILINEAR, 2), (Image.BICUBIC, 1)]:
transformed = im.transform(
size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample)
self.assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_resize_1_1x(self):
self._test_resize(1.1, 6.9)
def test_resize_1_5x(self):
self._test_resize(1.5, 5.5)
def test_resize_2_0x(self):
self._test_resize(2.0, 5.5)
def test_resize_2_3x(self):
self._test_resize(2.3, 3.7)
def test_resize_2_5x(self):
self._test_resize(2.5, 3.7)
def _test_translate(self, x, y, epsilonscale):
im = self._test_image()
size_up = int(round(im.width + x)), int(round(im.height + y))
matrix_up = [
1, 0, -x,
0, 1, -y,
0, 0]
matrix_down = [
1, 0, x,
0, 1, y,
0, 0]
for resample, epsilon in [(Image.NEAREST, 0),
(Image.BILINEAR, 1.5), (Image.BICUBIC, 1)]:
transformed = im.transform(
size_up, self.transform, matrix_up, resample)
transformed = transformed.transform(
im.size, self.transform, matrix_down, resample)
self.assert_image_similar(transformed, im, epsilon * epsilonscale)
def test_translate_0_1(self):
self._test_translate(.1, 0, 3.7)
def test_translate_0_6(self):
self._test_translate(.6, 0, 9.1)
def test_translate_50(self):
self._test_translate(50, 50, 0)
class TestImageTransformPerspective(TestImageTransformAffine):
# Repeat all tests for AFFINE transformations with PERSPECTIVE
transform = Image.PERSPECTIVE
if __name__ == '__main__':
unittest.main()