Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tf_lite #56

Open
abhinavchandel0 opened this issue May 3, 2022 · 9 comments
Open

Tf_lite #56

abhinavchandel0 opened this issue May 3, 2022 · 9 comments

Comments

@abhinavchandel0
Copy link

abhinavchandel0 commented May 3, 2022

INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
Traceback (most recent call last):
File "real_time_processing_tf_lite.py", line 69, in
interpreter_1.set_tensor(input_details_1[0]['index'], in_mag)
File "/usr/local/lib/python3.7/dist-packages/tflite_runtime/interpreter.py", line 698, in set_tensor
self._interpreter.SetTensor(tensor_index, value)
ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0.

Getting this error when I am running real_time_processing_tf_lite.py.
Any help will be greatly appreciable . Thanks in Advance!
@breizhn

@StuartIanNaylor
Copy link

#52 (comment)

@abhinavchandel0
Copy link
Author

abhinavchandel0 commented May 3, 2022

@StuartIanNaylor Please can you explain what changes do I have to make in my code or in process to get out of this error?

@StuartIanNaylor
Copy link

I never got round to trying but somehow the tensor sizes get mixed up so prob do a print(shape) and set the input tensors accordingly.
I ended up busy so haven't tried yet
@breizhn may help if time permits

@abhinavchandel0
Copy link
Author

@StuartIanNaylor Okay Thanks!....

@abhinavchandel0
Copy link
Author

abhinavchandel0 commented May 7, 2022

Code:
'''

    import soundfile as sf
    import numpy as np
    import tflite_runtime.interpreter as tflite
    import time
    
    
    
    ##########################
    # the values are fixed, if you need other values, you have to retrain.
    # The sampling rate of 16k is also fix.
    block_len = 512
    block_shift = 128
    # load models
    interpreter_1 = tflite.Interpreter(model_path='./models_DTLN_model/models_DTLN_model_1.tflite')
    interpreter_1.allocate_tensors()
    interpreter_2 = tflite.Interpreter(model_path='./models_DTLN_model/models_DTLN_model_2.tflite')
    interpreter_2.allocate_tensors()
    
    # Get input and output tensors.
    input_details_1 = interpreter_1.get_input_details()
    output_details_1 = interpreter_1.get_output_details()
    
    input_details_2 = interpreter_2.get_input_details()
    output_details_2 = interpreter_2.get_output_details()
    # create states for the lstms
    states_1 = np.zeros(input_details_1[1]['shape']).astype('float32')
    states_2 = np.zeros(input_details_2[1]['shape']).astype('float32')
    # load audio file at 16k fs (please change)
    audio,fs = sf.read('./models_DTLN_model/input/fileid_3.wav')
    # check for sampling rate
    if fs != 16000:
        raise ValueError('This model only supports 16k sampling rate.')
    # preallocate output audio
    out_file = np.zeros((len(audio)))
    # create buffer
    in_buffer = np.zeros((block_len)).astype('float32')
    out_buffer = np.zeros((block_len)).astype('float32')
    # calculate number of blocks
    num_blocks = (audio.shape[0] - (block_len-block_shift)) // block_shift
    time_array = []      
    # iterate over the number of blcoks  
    for idx in range(num_blocks):
        start_time = time.time()
        # shift values and write to buffer
        in_buffer[:-block_shift] = in_buffer[block_shift:]
        in_buffer[-block_shift:] = audio[idx*block_shift:(idx*block_shift)+block_shift]
        # calculate fft of input block
        in_block_fft = np.fft.rfft(in_buffer)
        in_mag = np.abs(in_block_fft)
        in_phase = np.angle(in_block_fft)
        # reshape magnitude to input dimensions
        in_mag = np.reshape(in_mag, (1,1,-1)).astype('float32')
        # set tensors to the first model
        interpreter_1.set_tensor(input_details_1[1]['index'], states_1)
        interpreter_1.set_tensor(input_details_1[0]['index'], in_mag)
        # run calculation
        interpreter_1.invoke()
        # get the output of the first block
        out_mask = interpreter_1.get_tensor(output_details_1[0]['index']) 
        states_1 = interpreter_1.get_tensor(output_details_1[1]['index'])
# calculate the ifft
    estimated_complex = in_mag * out_mask * np.exp(1j * in_phase)
    estimated_block = np.fft.irfft(estimated_complex)
    # reshape the time domain block
    estimated_block = np.reshape(estimated_block, (1,1,-1)).astype('float32')
    # set tensors to the second block
    interpreter_2.set_tensor(input_details_2[1]['index'], states_2)
    interpreter_2.set_tensor(input_details_2[0]['index'], estimated_block)
    # run calculation
    interpreter_2.invoke()
    # get output tensors
    out_block = interpreter_2.get_tensor(output_details_2[0]['index']) 
    states_2 = interpreter_2.get_tensor(output_details_2[1]['index']) 
    
    
    # shift values and write to buffer
    out_buffer[:-block_shift] = out_buffer[block_shift:]
    out_buffer[-block_shift:] = np.zeros((block_shift))
    out_buffer  += np.squeeze(out_block)
    # write block to output file
    out_file[idx*block_shift:(idx*block_shift)+block_shift] = out_buffer[:block_shift]
    time_array.append(time.time()-start_time)
    
# write to .wav file 
sf.write('out.wav', out_file, fs) 
print('Processing Time [ms]:')
print(np.mean(np.stack(time_array))*1000)
print('Processing finished.')

'''

Error: I got the following Dimension mismatch error.

'''

Traceback (most recent call last):
  File "real_time_processing_tf_lite.py", line 69, in <module>
    interpreter_1.set_tensor(input_details_1[0]['index'], in_mag)
  File "/usr/local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 698, in set_tensor
    self._interpreter.SetTensor(tensor_index, value)
ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0.

'''
This is what shape suppose to be:

print(interpreter_1.get_input_details())

>>name': 'serving_default_input_3:0', 'index': 0, 'shape': array([ 1, 2, 128, 2], dtype=int32), 'shape_signature': array([ 1, 2, 128, 2], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}, {'name': 'serving_default_input_2:0', 'index': 1, 'shape': array([ 1, 1, 257], dtype=int32), 'shape_signature': array([ 1, 1, 257], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}]

And when I am adding another dimension in the in_mag, getting this error:

in_mag = np.reshape(in_mag, (-1,1,1,-1)).astype('float32')


>>ValueError: can only specify one unknown dimension 

And for other values getting this error:

>>cannot reshape array of size 257 into shape (1,128,newaxis)

@StuartIanNaylor
Copy link

Yeah its really confusing for the likes of me as the in_mag shape is the output shape not the input

maybe @breizhn might have time to give us some pointers as I did the same and just swapped the model loads.
#52 (comment)

@breizhn
Copy link
Owner

breizhn commented May 16, 2022

So from

print(interpreter_1.get_input_details())

>>name': 'serving_default_input_3:0', 'index': 0, 'shape': array([ 1, 2, 128, 2], dtype=int32), 'shape_signature': array([ 1, 2, 128, 2], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}, {'name': 'serving_default_input_2:0', 'index': 1, 'shape': array([ 1, 1, 257], dtype=int32), 'shape_signature': array([ 1, 1, 257], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}]

we see that the TF-lite model has two inputs:
'index': 0, 'shape': array([ 1, 2, 128, 2]
'index': 1, 'shape': array([ 1, 1, 257]

From the input details you can see that, 'shape': array([ 1, 2, 128, 2] (the LSTM states) has idx=0 and 'shape': array([ 1, 1, 257] (the input magnitude) has idx=1.

so switch the index:

interpreter_1.set_tensor(input_details_1[0]['index'], states_1)
interpreter_1.set_tensor(input_details_1[1]['index'], in_mag)

if you get and error after the first iteration also switch

out_mask = interpreter_1.get_tensor(output_details_1[1]['index']) 
states_1 = interpreter_1.get_tensor(output_details_1[0]['index'])

Always check input and output details, so that the index for each input and output is correct.

@StuartIanNaylor
Copy link

StuartIanNaylor commented May 16, 2022

OK I am still having an off day but got to line 100 but thrown in loads of print statements so you can see.
I kept the load order but states_1 = np.zeros(input_details_1[0]['shape']).astype('float32') got swapped around

##########################
# the values are fixed, if you need other values, you have to retrain.
# The sampling rate of 16k is also fix.
block_len = 512
block_shift = 128
# load models
interpreter_1 = tflite.Interpreter(model_path='DTLN_model_1.tflite')
interpreter_1.allocate_tensors()
interpreter_2 = tflite.Interpreter(model_path='DTLN_model_2.tflite')
interpreter_2.allocate_tensors()

# Get input and output tensors.
input_details_1 = interpreter_1.get_input_details()
output_details_1 = interpreter_1.get_output_details()

input_details_2 = interpreter_2.get_input_details()
output_details_2 = interpreter_2.get_output_details()
print(input_details_1, "\n")
print(output_details_1, "\n")
print(input_details_2, "\n")
print(output_details_2, "\n")
# create states for the lstms
states_1 = np.zeros(input_details_1[0]['shape']).astype('float32')
states_2 = np.zeros(input_details_2[1]['shape']).astype('float32')
# load audio file at 16k fs (please change)
audio,fs = sf.read('fileid_10.wav')
# check for sampling rate
if fs != 16000:
    raise ValueError('This model only supports 16k sampling rate.')
# preallocate output audio
out_file = np.zeros((len(audio)))
# create buffer
in_buffer = np.zeros((block_len)).astype('float32')
out_buffer = np.zeros((block_len)).astype('float32')
# calculate number of blocks
num_blocks = (audio.shape[0] - (block_len-block_shift)) // block_shift
time_array = []      
# iterate over the number of blcoks  
for idx in range(num_blocks):
    start_time = time.time()
    # shift values and write to buffer
    in_buffer[:-block_shift] = in_buffer[block_shift:]
    in_buffer[-block_shift:] = audio[idx*block_shift:(idx*block_shift)+block_shift]
    # calculate fft of input block
    in_block_fft = np.fft.rfft(in_buffer)
    in_mag = np.abs(in_block_fft)
    in_phase = np.angle(in_block_fft)
    # reshape magnitude to input dimensions
    in_mag = np.reshape(in_mag, (1,1,-1)).astype('float32')
    # set tensors to the first model
    print(np.shape(states_1), np.shape(in_mag))
    interpreter_1.set_tensor(input_details_1[0]['index'], states_1)
    interpreter_1.set_tensor(input_details_1[1]['index'], in_mag)
    # run calculation 
    interpreter_1.invoke()
    # get the output of the first block
    out_mask = interpreter_1.get_tensor(output_details_1[0]['index']) 
    states_1 = interpreter_1.get_tensor(output_details_1[1]['index'])
    print(np.shape(out_mask), np.shape(states_1))
    # calculate the ifft
    estimated_complex = in_mag * out_mask * np.exp(1j * in_phase)
    estimated_block = np.fft.irfft(estimated_complex)
    # reshape the time domain block
    estimated_block = np.reshape(estimated_block, (1,1,-1)).astype('float32')
    # set tensors to the second block
    print(np.shape(states_2), np.shape(estimated_block))
    interpreter_2.set_tensor(input_details_2[1]['index'], states_2)
    interpreter_2.set_tensor(input_details_2[0]['index'], estimated_block)
    # run calculation
    interpreter_2.invoke()
    # get output tensors
    out_block = interpreter_2.get_tensor(output_details_2[0]['index']) 
    states_2 = interpreter_2.get_tensor(output_details_2[1]['index'])
    print(np.shape(out_block), np.shape(states_2))
    
    
    
    # shift values and write to buffer
    out_buffer[:-block_shift] = out_buffer[block_shift:]
    out_buffer[-block_shift:] = np.zeros((block_shift))
    out_buffer  += np.squeeze(out_block)
    # write block to output file
    out_file[idx*block_shift:(idx*block_shift)+block_shift] = out_buffer[:block_shift]
    time_array.append(time.time()-start_time)

I get to here and maybe those shape printouts may help

python real_time_processing_tf_lite.py
INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
[{'name': 'serving_default_input_3:0', 'index': 0, 'shape': array([  1,   2, 128,   2], dtype=int32), 'shape_signature': array([  1,   2, 128,   2], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}, {'name': 'serving_default_input_2:0', 'index': 1, 'shape': array([  1,   1, 257], dtype=int32), 'shape_signature': array([  1,   1, 257], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}] 

[{'name': 'StatefulPartitionedCall:0', 'index': 64, 'shape': array([  1,   1, 257], dtype=int32), 'shape_signature': array([  1,   1, 257], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}, {'name': 'StatefulPartitionedCall:1', 'index': 69, 'shape': array([  1,   2, 128,   2], dtype=int32), 'shape_signature': array([  1,   2, 128,   2], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}] 

[{'name': 'serving_default_input_4:0', 'index': 0, 'shape': array([  1,   1, 512], dtype=int32), 'shape_signature': array([  1,   1, 512], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}, {'name': 'serving_default_input_5:0', 'index': 1, 'shape': array([  1,   2, 128,   2], dtype=int32), 'shape_signature': array([  1,   2, 128,   2], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}] 

[{'name': 'StatefulPartitionedCall:1', 'index': 97, 'shape': array([  1,   2, 128,   2], dtype=int32), 'shape_signature': array([  1,   2, 128,   2], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}, {'name': 'StatefulPartitionedCall:0', 'index': 92, 'shape': array([  1,   1, 512], dtype=int32), 'shape_signature': array([  1,   1, 512], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}] 

(1, 2, 128, 2) (1, 1, 257)
(1, 1, 257) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 512)
(1, 2, 128, 2) (1, 1, 512)
Traceback (most recent call last):
  File "/home/stuart/DTLN/real_time_processing_tf_lite.py", line 100, in <module>
    out_buffer  += np.squeeze(out_block)
ValueError: operands could not be broadcast together with shapes (512,) (2,128,2) (512,) 

Which then swapping the indexes on

    # get output tensors
    out_block = interpreter_2.get_tensor(output_details_2[1]['index']) 
    states_2 = interpreter_2.get_tensor(output_details_2[0]['index'])

Yeah :) thanks

...1, 2, 128, 2) (1, 1, 512)
(1, 1, 512) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 257)
(1, 1, 257) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 512)
(1, 1, 512) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 257)
(1, 1, 257) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 512)
(1, 1, 512) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 257)
(1, 1, 257) (1, 2, 128, 2)
(1, 2, 128, 2) (1, 1, 512)
(1, 1, 512) (1, 2, 128, 2)
Processing Time [ms]:
1.2205140826732024
Processing finished.

@StuartIanNaylor
Copy link

So if that was as confusing as it was for this slightly befuddled brain at the moment just try this

"""
This is an example how to implement real time processing of the DTLN tf light
model in python.
Please change the name of the .wav file at line 43 before running the sript.
For .whl files of the tf light runtime go to: 
    https://www.tensorflow.org/lite/guide/python
    
Author: Nils L. Westhausen ([email protected])
Version: 30.06.2020
This code is licensed under the terms of the MIT-license.
"""

import soundfile as sf
import numpy as np
import tensorflow.lite as tflite
import time



##########################
# the values are fixed, if you need other values, you have to retrain.
# The sampling rate of 16k is also fix.
block_len = 512
block_shift = 128
# load models
interpreter_1 = tflite.Interpreter(model_path='DTLN_model_1.tflite')
interpreter_1.allocate_tensors()
interpreter_2 = tflite.Interpreter(model_path='DTLN_model_2.tflite')
interpreter_2.allocate_tensors()

# Get input and output tensors.
input_details_1 = interpreter_1.get_input_details()
output_details_1 = interpreter_1.get_output_details()

input_details_2 = interpreter_2.get_input_details()
output_details_2 = interpreter_2.get_output_details()
print(input_details_1, "\n")
print(output_details_1, "\n")
print(input_details_2, "\n")
print(output_details_2, "\n")
# create states for the lstms
states_1 = np.zeros(input_details_1[0]['shape']).astype('float32')
states_2 = np.zeros(input_details_2[1]['shape']).astype('float32')
# load audio file at 16k fs (please change)
audio,fs = sf.read('fileid_10.wav')
# check for sampling rate
if fs != 16000:
    raise ValueError('This model only supports 16k sampling rate.')
# preallocate output audio
out_file = np.zeros((len(audio)))
# create buffer
in_buffer = np.zeros((block_len)).astype('float32')
out_buffer = np.zeros((block_len)).astype('float32')
# calculate number of blocks
num_blocks = (audio.shape[0] - (block_len-block_shift)) // block_shift
time_array = []      
# iterate over the number of blcoks  
for idx in range(num_blocks):
    start_time = time.time()
    # shift values and write to buffer
    in_buffer[:-block_shift] = in_buffer[block_shift:]
    in_buffer[-block_shift:] = audio[idx*block_shift:(idx*block_shift)+block_shift]
    # calculate fft of input block
    in_block_fft = np.fft.rfft(in_buffer)
    in_mag = np.abs(in_block_fft)
    in_phase = np.angle(in_block_fft)
    # reshape magnitude to input dimensions
    in_mag = np.reshape(in_mag, (1,1,-1)).astype('float32')
    # set tensors to the first model
    print(np.shape(states_1), np.shape(in_mag))
    interpreter_1.set_tensor(input_details_1[0]['index'], states_1)
    interpreter_1.set_tensor(input_details_1[1]['index'], in_mag)
    # run calculation 
    interpreter_1.invoke()
    # get the output of the first block
    out_mask = interpreter_1.get_tensor(output_details_1[0]['index']) 
    states_1 = interpreter_1.get_tensor(output_details_1[1]['index'])
    print(np.shape(out_mask), np.shape(states_1))
    # calculate the ifft
    estimated_complex = in_mag * out_mask * np.exp(1j * in_phase)
    estimated_block = np.fft.irfft(estimated_complex)
    # reshape the time domain block
    estimated_block = np.reshape(estimated_block, (1,1,-1)).astype('float32')
    # set tensors to the second block
    print(np.shape(states_2), np.shape(estimated_block))
    interpreter_2.set_tensor(input_details_2[1]['index'], states_2)
    interpreter_2.set_tensor(input_details_2[0]['index'], estimated_block)
    # run calculation
    interpreter_2.invoke()
    # get output tensors
    out_block = interpreter_2.get_tensor(output_details_2[1]['index']) 
    states_2 = interpreter_2.get_tensor(output_details_2[0]['index'])
    print(np.shape(out_block), np.shape(states_2))
    
    
    
    # shift values and write to buffer
    out_buffer[:-block_shift] = out_buffer[block_shift:]
    out_buffer[-block_shift:] = np.zeros((block_shift))
    out_buffer  += np.squeeze(out_block)
    # write block to output file
    out_file[idx*block_shift:(idx*block_shift)+block_shift] = out_buffer[:block_shift]
    time_array.append(time.time()-start_time)
    
# write to .wav file 
sf.write('out.wav', out_file, fs) 
print('Processing Time [ms]:')
print(np.mean(np.stack(time_array))*1000)
print('Processing finished.')

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants