-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsplit_speech.py
More file actions
223 lines (191 loc) · 7.74 KB
/
split_speech.py
File metadata and controls
223 lines (191 loc) · 7.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
"""
Looks for pauses in sound file and stretches the pauses
to give time for repeating previous piece of speech.
The purpose of this script was to prepare sound file
for pronunciation exercises (repeating speech) for
foreign language practice.
"""
import argparse
import sys
from pydub import AudioSegment
from pydub.silence import detect_silence
class SoundFile:
def __init__(self, sound_file):
self.input_file = AudioSegment.from_mp3(sound_file)
def detect_silences_manually(self,
minimum_silence_length=100,
silence_threshold=-50):
"""
Seeks silences of the minimum length. Silence is detected by the given threshold value.
"""
self.silences = detect_silence(self.input_file,
min_silence_len=minimum_silence_length,
silence_thresh=silence_threshold)
self.delete_leading_silence()
self.delete_trailing_silence()
return self.silences
def detect_silences_automatically(self):
"""
Try to detect the silences automatically.
"""
sample = self.get_20s_from_the_middle()
threshold = 2 * int(self.input_file.dBFS)
threshold = self.find_threshold_in_sample(threshold, sample)
minimum_silence_length = 100
self.silences = detect_silence(self.input_file, minimum_silence_length,
threshold)
self.delete_leading_silence()
self.delete_trailing_silence()
minimum_sentence_length = 800
self.generate_speech_chunks(minimum_sentence_length)
self.extend_silences(100, 5000)
def generate_speech_chunks(self, minimum_sentence_length=100):
"""
Get two dimensional array of speech starts and stops withing given sound_file.
[[first_piece_start, first_pice_end], [second_piece_start, second_piece_end]...]
"""
self.speech_chunks = []
begining_of_sample = 0
self.speech_chunks.append([begining_of_sample])
index = 0
for silence in self.silences:
current_silence_begining = silence[0]
current_silence_end = silence[1]
current_speech_chunk_beggining = self.speech_chunks[index][0]
current_speech_chunk_length = current_silence_begining - current_speech_chunk_beggining
if current_speech_chunk_length >= minimum_sentence_length:
self.speech_chunks[index].append(current_silence_begining)
self.speech_chunks.append([current_silence_end])
index += 1
end_of_sample = len(self.input_file)
self.speech_chunks[-1].append(end_of_sample)
return self.speech_chunks
def extend_silences(self,
percentage_of_speech=100,
maximum_sentence_length=10000):
"""
Inserts after a speech piece silence of percentage_of_speech length.
E.g. if the piece is 1500ms length and the percentage_of_speech = 100,
it inserts a 1500ms long silence after the speech piece.
"""
self.resulting_sound = AudioSegment.empty()
for i in range(len(self.speech_chunks)):
beggining_of_chunk = self.speech_chunks[i][0]
end_of_chunk = self.speech_chunks[i][1]
chunk_length = end_of_chunk - beggining_of_chunk
if chunk_length < maximum_sentence_length:
silence_length = chunk_length * percentage_of_speech / 100
self.resulting_sound = self.resulting_sound + self.input_file[
beggining_of_chunk:end_of_chunk] + AudioSegment.silent(
silence_length)
else:
its_last_chunk = (len(self.speech_chunks) - 1) == i
if its_last_chunk:
self.resulting_sound = self.resulting_sound + self.input_file[
beggining_of_chunk:end_of_chunk]
else:
beggining_of_next_chunk = self.speech_chunks[i + 1][0]
self.resulting_sound = self.resulting_sound + self.input_file[
beggining_of_chunk:beggining_of_next_chunk]
def write_resulting_file(self, file_name):
"""
Exports resulting file to file_name.
"""
with open(file_name, "wb") as f:
self.resulting_sound.export(f, format="mp3")
def delete_leading_silence(self):
"""
Remove leading silence from silences list.
"""
beggining_of_first_silence = self.silences[0][0]
if beggining_of_first_silence == 0:
del self.silences[0]
def delete_trailing_silence(self):
"""
Remove trailing silence from silences list.
"""
length_of_input_file = len(self.input_file)
end_of_last_silence = self.silences[-1][1]
if end_of_last_silence == length_of_input_file:
del self.silences[-1]
def get_20s_from_the_middle(self):
"""
Returns a 20s sample.
"""
length = len(self.input_file)
twenty_seconds = 20_000
ten_seconds = 10_000
if length <= twenty_seconds:
return self.input_file
else:
middle = int(length / 2)
return self.input_file[middle - ten_seconds:middle + ten_seconds]
@staticmethod
def find_threshold_in_sample(initial_threshold, sample):
"""
Find threshold for silence in the sample starting with initial_threshold value.
"""
threshold = initial_threshold
for i in range(0, -10, -2):
sample_silences = detect_silence(sample,
min_silence_len=100,
silence_thresh=threshold + i)
number_of_silences_in_sample = len(sample_silences)
if 8 <= number_of_silences_in_sample <= 16:
threshold += i
break
return threshold
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='split_speech',
description='extends silences to give time for repetition')
parser.add_argument("input", help="input mp3 file")
parser.add_argument("output", help="resulting mp3 file")
parser.add_argument(
"-s",
type=int,
default=100,
metavar="ms",
help="minimum silence length in milliseconds (default 100)")
parser.add_argument(
"-p",
type=int,
default=100,
metavar="%",
help=
"set silence length as percentage of previous sound duration (default 100)"
)
parser.add_argument(
"-t",
type=int,
default=-50,
metavar="dB",
help=
"threshold for silence (dB) (default -50): below this level sound is counted as silence"
)
parser.add_argument(
"-e",
type=int,
default=200,
metavar="ms",
help=
"minimum speech length: after sentences shorter than this limit do not add silences"
)
args = parser.parse_args()
input_file = args.input
output_file = args.output
sound_file = SoundFile(input_file)
if len(sys.argv) == 3:
sound_file.detect_silences_automatically()
else:
min_sil_length = args.s
min_speech_length = args.e
sil_percentage = args.p
sil_threshold = args.t
sound_file.detect_silences_manually(
minimum_silence_length=min_sil_length,
silence_threshold=sil_threshold)
sound_file.generate_speech_chunks(
minimum_sentence_length=min_speech_length)
sound_file.extend_silences(percentage_of_speech=sil_percentage)
sound_file.write_resulting_file(output_file)