-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathsoundify.py
132 lines (108 loc) · 3.25 KB
/
soundify.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import argparse
import pyaudio
import numpy as np
from collections import OrderedDict
try:
import Image
except ImportError:
from PIL import Image
background = (0,0,0)
duration = 2 #length to play each pixel for
volume = 0.5 # range [0.0, 1.0]
fs = 44100 # sampling rate, Hz, must be integer
#Here's all the
od = OrderedDict()
od['A'] = 220.000
od['A#'] = 233.080
od['B'] = 246.94
od['C'] = 261.625
od['C#'] = 277.185
od['D'] = 293.665
od['D#'] = 311.125
od['E'] = 329.625
od['F'] = 349.23
od['F#'] = 369.995
od['G'] = 391.995
od['G#'] = 415.305
od['a'] = 440.0
od['a#'] = 466.16
od['b'] = 493.88
od['c'] = 523.25
od['c#'] = 554.37
od['d'] = 587.33
od['d#'] = 622.25
od['e'] = 659.25
od['f'] = 698.46
od['f#'] = 739.99
od['g'] = 783.99
od['g#'] = 830.61
odl = list(od.items())
def make_note(fs,duration,f):
samples = (np.sin(2*np.pi*np.arange(fs*duration)*f/fs)).astype(np.float32)
return samples
def make_chord(ratios,fs,duration,f):
chord = make_note(fs,duration,0)
for r in ratios:
chord = sum([chord,make_note(fs,duration,f*r/ratios[0])])
return chord
def make_chrom_chord(notes,fs,duration):
chord = make_note(fs,duration,0)
for n in notes:
chord = sum([chord,make_note(fs,duration,odl[n][1])])
return chord
def make_scale_chord(notes,fs,duration):
#make scale
keys = ['A','B','C#','D','E','F#','G#','a','b','c#','d','e','f#','g#']
Amaj = OrderedDict((k,od[k]) for k in keys if k in od)
Amajl = list(Amaj.items())
chord = make_note(fs,duration,0)
for n in notes:
chord = sum([chord,make_note(fs,duration,Amajl[n][1])])
return chord
def stop():
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == "__main__":
#commandline handling
parser = argparse.ArgumentParser(description='Performs wfc on an image to generate a specified number of images')
parser.add_argument('-f', dest='name', type=str , required=True,
help='path to image to play')
parser.add_argument('-o', dest='out', type=str, default='major',
help='What set of notes to play from. Options are chromatic, major, or harmonic.')
args = parser.parse_args()
p = pyaudio.PyAudio()
im = Image.open(args.name)
# for paFloat32 sample values must be in range [-1.0, 1.0]
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=fs,
output=True)
pixels = list(im.getdata())
width, height = im.size
pix = im.load()
roll = np.zeros((width,height))
for i in range(width):
for j in range(height):
#create a matrix to iterate on
if pix[i,j] != background:
roll[i][j] = 1
print("\n")
#sound it out!
for i in range(width):
chord = []
for j in range(height):
if roll[i][j]:
chord.append(j+1)
print(chord)
if(args.out == 'harmonic'):
#harmonics
A = make_chord(chord,fs,duration,440)
elif(args.out == 'chromatic'):
#from chromatic
A = make_chrom_chord(chord,fs,duration)
else:
#from Amaj scale
A = make_scale_chord(chord,fs,duration)
stream.write(volume*A)
stop()