You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
~/anaconda3/lib/python3.8/site-packages/fastai/data_block.py in process(self)
536 "Process the inner datasets."
537 xp,yp = self.get_processors()
--> 538 for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n)
539 #progress_bar clear the outputs so in some case warnings issued during processing disappear.
540 for ds in self.lists:
~/anaconda3/lib/python3.8/site-packages/librosa/effects.py in _signal_to_frame_nonsilent(y, frame_length, hop_length, top_db, ref)
443 """
444 # Convert to mono
--> 445 y_mono = core.to_mono(y)
446
447 # Compute the MSE for the signal
~/anaconda3/lib/python3.8/site-packages/librosa/core/audio.py in to_mono(y)
449
450 # Validate the buffer. Stereo is ok here.
--> 451 util.valid_audio(y, mono=False)
452
453 if y.ndim > 1:
~/anaconda3/lib/python3.8/site-packages/librosa/util/utils.py in valid_audio(y, mono)
303
304 elif y.ndim == 2 and y.shape[0] < 2:
--> 305 raise ParameterError(
306 "Mono data must have shape (samples,). " "Received shape={}".format(y.shape)
307 )
ParameterError: Mono data must have shape (samples,). Received shape=(1, 42240)
The text was updated successfully, but these errors were encountered:
While working on on commands :-
config_trim.silence_padding = 100
audio_trim=AudioList.from_folder(data_folder, config=config_trim).split_by_rand_pct(.2, seed=4).label_from_re(label_pattern)
I get the error
ParameterError Traceback (most recent call last)
in
1 config_trim.silence_padding = 100
----> 2 audio_trim=AudioList.from_folder(data_folder, config=config_trim).split_by_rand_pct(.2, seed=4).label_from_re(label_pattern)
~/anaconda3/lib/python3.8/site-packages/fastai/data_block.py in _inner(*args, **kwargs)
482 self.valid = fv(*args, from_item_lists=True, **kwargs)
483 self.class = LabelLists
--> 484 self.process()
485 return self
486 return _inner
~/anaconda3/lib/python3.8/site-packages/fastai/data_block.py in process(self)
536 "Process the inner datasets."
537 xp,yp = self.get_processors()
--> 538 for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n)
539 #progress_bar clear the outputs so in some case warnings issued during processing disappear.
540 for ds in self.lists:
~/Desktop/project_radar/fastai_audio/audio/data.py in process(self, *args, **kwargs)
280
281 def process(self, *args, **kwargs):
--> 282 self._pre_process()
283 super().process(*args, **kwargs)
284 self.x.config._processed = True
~/Desktop/project_radar/fastai_audio/audio/data.py in _pre_process(self)
265 if x.config.remove_silence:
266 print("Preprocessing: Removing Silence")
--> 267 items = [remove_silence(i, x.config, x.path) for i in progress_bar(items)]
268 items = reduce(concat, items, np.empty((0, 2)))
269
~/Desktop/project_radar/fastai_audio/audio/data.py in (.0)
265 if x.config.remove_silence:
266 print("Preprocessing: Removing Silence")
--> 267 items = [remove_silence(i, x.config, x.path) for i in progress_bar(items)]
268 items = reduce(concat, items, np.empty((0, 2)))
269
~/Desktop/project_radar/fastai_audio/audio/data.py in remove_silence(item, config, path)
182 if not files:
183 sig, sr = torchaudio.load(item_path)
--> 184 sigs = tfm_remove_silence(sig, sr, remove_type, st, sp)
185 files = make_cache(sigs, sr, config, cache_prefix, item_path, [st, sp])
186 _record_cache_contents(config, files)
~/Desktop/project_radar/fastai_audio/audio/transform.py in tfm_remove_silence(signal, rate, remove_type, threshold, pad_ms)
176 padding = int(pad_ms/1000*rate)
177 if(padding > actual.shape[-1]): return [actual]
--> 178 splits = split(actual.numpy(), top_db=threshold, hop_length=padding)
179 if remove_type == "split":
180 return [actual[:,(max(a-padding,0)):(min(b+padding,actual.shape[-1]))] for (a, b) in _merge_splits(splits, padding)]
~/anaconda3/lib/python3.8/site-packages/librosa/effects.py in split(y, top_db, ref, frame_length, hop_length)
547 """
548
--> 549 non_silent = _signal_to_frame_nonsilent(
550 y, frame_length=frame_length, hop_length=hop_length, ref=ref, top_db=top_db
551 )
~/anaconda3/lib/python3.8/site-packages/librosa/effects.py in _signal_to_frame_nonsilent(y, frame_length, hop_length, top_db, ref)
443 """
444 # Convert to mono
--> 445 y_mono = core.to_mono(y)
446
447 # Compute the MSE for the signal
~/anaconda3/lib/python3.8/site-packages/librosa/core/audio.py in to_mono(y)
449
450 # Validate the buffer. Stereo is ok here.
--> 451 util.valid_audio(y, mono=False)
452
453 if y.ndim > 1:
~/anaconda3/lib/python3.8/site-packages/librosa/util/utils.py in valid_audio(y, mono)
303
304 elif y.ndim == 2 and y.shape[0] < 2:
--> 305 raise ParameterError(
306 "Mono data must have shape (samples,). " "Received shape={}".format(y.shape)
307 )
ParameterError: Mono data must have shape (samples,). Received shape=(1, 42240)
The text was updated successfully, but these errors were encountered: