-
Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathdataset.py
42 lines (32 loc) · 1.35 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import numpy as np
import torch
from torch.utils.data import Dataset
import json
from random import randint
from pathlib import Path
class SpeechDataset(Dataset):
def __init__(self, root, hop_length, sr, sample_frames):
self.root = Path(root)
self.hop_length = hop_length
self.sample_frames = sample_frames
with open(self.root / "speakers.json") as file:
self.speakers = sorted(json.load(file))
min_duration = (sample_frames + 2) * hop_length / sr
with open(self.root / "train.json") as file:
metadata = json.load(file)
self.metadata = [
Path(out_path) for _, _, duration, out_path in metadata
if duration > min_duration
]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
path = self.metadata[index]
path = self.root.parent / path
audio = np.load(path.with_suffix(".wav.npy"))
mel = np.load(path.with_suffix(".mel.npy"))
pos = randint(1, mel.shape[-1] - self.sample_frames - 2)
mel = mel[:, pos - 1:pos + self.sample_frames + 1]
audio = audio[pos * self.hop_length:(pos + self.sample_frames) * self.hop_length + 1]
speaker = self.speakers.index(path.parts[-2])
return torch.LongTensor(audio), torch.FloatTensor(mel), speaker