Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Ready] [Recipes] add aishell2 #465

Merged
merged 14 commits into from
Jul 14, 2022
Empty file.
114 changes: 114 additions & 0 deletions egs/aishell2/ASR/local/compute_fbank_aishell2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


"""
This file computes fbank features of the aishell2 dataset.
It looks for manifests in the directory data/manifests.

The generated fbank features are saved in data/fbank.
"""

import argparse
import logging
import os
from pathlib import Path

import torch
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached

from icefall.utils import get_executor

# Torch's multithreaded behavior needs to be disabled or
# it wastes a lot of CPU and slow things down.
# Do this outside of main() in case it needs to take effect
# even when we are not invoking the main (e.g. when spawning subprocesses).
torch.set_num_threads(1)
torch.set_num_interop_threads(1)


def compute_fbank_aishell2(num_mel_bins: int = 80):
src_dir = Path("data/manifests")
output_dir = Path("data/fbank")
num_jobs = min(15, os.cpu_count())

dataset_parts = (
"train",
"dev",
"test",
)
prefix = "aishell2"
suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts,
output_dir=src_dir,
prefix=prefix,
suffix=suffix,
)
assert manifests is not None

extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))

with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
cut_set = CutSet.from_manifests(
recordings=m["recordings"],
supervisions=m["supervisions"],
)
if "train" in partition:
cut_set = (
cut_set
+ cut_set.perturb_speed(0.9)
+ cut_set.perturb_speed(1.1)
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 80,
executor=ex,
storage_type=LilcomChunkyWriter,
)
cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")


def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-mel-bins",
type=int,
default=80,
help="""The number of mel bins for Fbank""",
)

return parser.parse_args()


if __name__ == "__main__":
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)

logging.basicConfig(format=formatter, level=logging.INFO)

args = get_args()
compute_fbank_aishell2(num_mel_bins=args.num_mel_bins)
1 change: 1 addition & 0 deletions egs/aishell2/ASR/local/compute_fbank_musan.py
96 changes: 96 additions & 0 deletions egs/aishell2/ASR/local/display_manifest_statistics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
This file displays duration statistics of utterances in a manifest.
You can use the displayed value to choose minimum/maximum duration
to remove short and long utterances during the training.

See the function `remove_short_and_long_utt()` in transducer_stateless/train.py
for usage.
"""


from lhotse import load_manifest_lazy


def main():
paths = [
"./data/fbank/aishell2_cuts_train.jsonl.gz",
"./data/fbank/aishell2_cuts_dev.jsonl.gz",
"./data/fbank/aishell2_cuts_test.jsonl.gz"
]

for path in paths:
print(f"Starting display the statistics for {path}")
cuts = load_manifest_lazy(path)
cuts.describe()


if __name__ == "__main__":
main()

'''
Starting display the statistics for ./data/fbank/aishell2_cuts_train.jsonl.gz
Cuts count: 3026106
Total duration (hours): 3021.2
Speech duration (hours): 3021.2 (100.0%)
***
Duration statistics (seconds):
mean 3.6
std 1.5
min 0.3
25% 2.4
50% 3.3
75% 4.4
99% 8.2
99.5% 8.9
99.9% 10.6
max 21.5
Starting display the statistics for ./data/fbank/aishell2_cuts_dev.jsonl.gz
Cuts count: 2500
Total duration (hours): 2.0
Speech duration (hours): 2.0 (100.0%)
***
Duration statistics (seconds):
mean 2.9
std 1.0
min 1.1
25% 2.2
50% 2.7
75% 3.4
99% 6.3
99.5% 6.7
99.9% 7.8
max 9.4
Starting display the statistics for ./data/fbank/aishell2_cuts_test.jsonl.gz
Cuts count: 5000
Total duration (hours): 4.0
Speech duration (hours): 4.0 (100.0%)
***
Duration statistics (seconds):
mean 2.9
std 1.0
min 1.1
25% 2.2
50% 2.7
75% 3.3
99% 6.2
99.5% 6.6
99.9% 7.7
max 8.5
'''
1 change: 1 addition & 0 deletions egs/aishell2/ASR/local/prepare_char.py
1 change: 1 addition & 0 deletions egs/aishell2/ASR/local/prepare_lang.py
1 change: 1 addition & 0 deletions egs/aishell2/ASR/local/prepare_words.py
1 change: 1 addition & 0 deletions egs/aishell2/ASR/local/text2segments.py
1 change: 1 addition & 0 deletions egs/aishell2/ASR/local/text2token.py
152 changes: 152 additions & 0 deletions egs/aishell2/ASR/prepare.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
#!/usr/bin/env bash

set -eou pipefail

nj=30
stage=0
stop_stage=5

# We assume dl_dir (download dir) contains the following
# directories and files. If not, you need to apply aishell2 through
# their official website.
#
# - $dl_dir/aishell2
#
#
# - $dl_dir/musan
# This directory contains the following directories downloaded from
# http://www.openslr.org/17/
#
# - music
# - noise
# - speech

dl_dir=$PWD/download

. shared/parse_options.sh || exit 1

# All files generated by this script are saved in "data".
# You can safely remove "data" and rerun this script to regenerate it.
mkdir -p data

log() {
# This function is from espnet
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}

log "dl_dir: $dl_dir"

if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
log "stage 0: Download data"

# If you have pre-downloaded it to /path/to/aishell2,
# you can create a symlink
#
# ln -sfv /path/to/aishell2 $dl_dir/aishell2
#
# The directory structure is
# aishell2/
# |-- AISHELL-2
# | |-- iOS
# |-- data
# |-- wav
# |-- trans.txt
# |-- dev
# |-- wav
# |-- trans.txt
# |-- test
# |-- wav
# |-- trans.txt


# If you have pre-downloaded it to /path/to/musan,
# you can create a symlink
#
# ln -sfv /path/to/musan $dl_dir/musan
#
if [ ! -d $dl_dir/musan ]; then
lhotse download musan $dl_dir
fi
fi

if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Prepare aishell2 manifest"
# We assume that you have downloaded and unzip the aishell2 corpus
# to $dl_dir/aishell2
if [ ! -f data/manifests/.aishell_manifests.done ]; then
mkdir -p data/manifests
lhotse prepare aishell2 $dl_dir/aishell2 data/manifests -j $nj
touch data/manifests/.aishell2_manifests.done
fi
fi

if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Prepare musan manifest"
# We assume that you have downloaded the musan corpus
# to data/musan
if [ ! -f data/manifests/.musan_manifests.done ]; then
log "It may take 6 minutes"
mkdir -p data/manifests
lhotse prepare musan $dl_dir/musan data/manifests
touch data/manifests/.musan_manifests.done
fi
fi

if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
log "Stage 3: Compute fbank for aishell"
if [ ! -f data/fbank/.aishell2.done ]; then
mkdir -p data/fbank
./local/compute_fbank_aishell2.py
touch data/fbank/.aishell2.done
fi
fi

if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
log "Stage 4: Compute fbank for musan"
if [ ! -f data/fbank/.msuan.done ]; then
mkdir -p data/fbank
./local/compute_fbank_musan.py
touch data/fbank/.msuan.done
fi
fi

if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
log "Stage 5: Prepare char based lang"
lang_char_dir=data/lang_char
mkdir -p $lang_char_dir

# Prepare text.
# Note: in Linux, you can install jq with the following command:
# 1. wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
# 2. chmod +x ./jq
# 3. cp jq /usr/bin
if [ ! -f $lang_char_dir/text ]; then
gunzip -c data/manifests/aishell2_supervisions_train.jsonl.gz \
| jq '.text' | sed 's/"//g' \
| ./local/text2token.py -t "char" > $lang_char_dir/text
fi

# The implementation of chinese word segmentation for text,
# and it will take about 15 minutes.
# If can't install paddle-tiny with python 3.8, please refer
# https://github.com/fxsjy/jieba/issues/920
if [ ! -f $lang_char_dir/text_words_segmentation ]; then
python3 ./local/text2segments.py \
--input-file $lang_char_dir/text \
--output-file $lang_char_dir/text_words_segmentation
fi

cat $lang_char_dir/text_words_segmentation | sed 's/ /\n/g' \
| sort -u | sed '/^$/d' | uniq > $lang_char_dir/words_no_ids.txt

if [ ! -f $lang_char_dir/words.txt ]; then
python3 ./local/prepare_words.py \
--input-file $lang_char_dir/words_no_ids.txt \
--output-file $lang_char_dir/words.txt
fi

if [ ! -f $lang_char_dir/L_disambig.pt ]; then
python3 ./local/prepare_char.py
fi
fi
Empty file.
Loading