精易论坛

标题: 刚学python,来个大佬帮我把这个代码改成GPU加速运行的,..... [打印本页]

作者: 音鬼    时间: 2024-8-1 08:16
标题: 刚学python,来个大佬帮我把这个代码改成GPU加速运行的,.....

直接上附件了,会的大佬把我改一下,改成用GPU运行的,现在运行是用cpu太慢了,我的显卡型号是:NVIDIA GeForce RTX 3060

123.rar

1.66 KB, 下载次数: 5

源代码


作者: suyunsq    时间: 2024-8-1 08:25
提示: 作者被禁止或删除 内容自动屏蔽
作者: oncall12    时间: 2024-8-1 08:46
[Python] 纯文本查看 复制代码
import cupy as cp  # 导入CuPy库代替NumPy
import scipy.signal as signal
import wave
import struct
import matplotlib.pyplot as plt

# 定义所有滤波器函数,但使用CuPy数组
def allpass(input_signal, delay, gain):
    B = cp.zeros(delay)
    B[0] = gain
    B[delay-1] = 1
    A = cp.zeros(delay)
    A[0] = 1
    A[delay-1] = gain
    output_signal = cp.zeros(input_signal.shape)
    output_signal = signal.lfilter(B, A, input_signal)  # 注意:lfilter目前不支持CuPy数组
    return output_signal

def comb(input_signal, delay, gain):
    B = cp.zeros(delay)
    B[delay-1] = 1
    A = cp.zeros(delay)
    A[0] = 1
    A[delay-1] = -gain
    output_signal = cp.zeros(input_signal.shape)
    output_signal = signal.lfilter(B, A, input_signal)  # 注意:lfilter目前不支持CuPy数组
    return output_signal

def comb_with_lp(input_signal, delay, g, g1):
    g2 = g * (1 - g1)
    B = cp.zeros(delay + 1)
    B[delay-1] = 1
    B[delay] = -g1
    A = cp.zeros(delay)
    A[0] = 1
    A[1] = -g1
    A[delay-1] = -g2
    output_signal = cp.zeros(input_signal.shape)
    output_signal = signal.lfilter(B, A, input_signal)  # 注意:lfilter目前不支持CuPy数组
    return output_signal

def delay(input_signal, delay, gain=1):
    output_signal = cp.concatenate((cp.zeros(delay), input_signal))
    output_signal = output_signal * gain
    return output_signal

def main():
    # 打开/生成测试WAV信号
    # 克罗内克δ函数
    # sample = cp.zeros((2, 88200))
    # sample[:, 0] = 1
    # WAV 文件
    sample_in = 'D:\\321\\555.wav'
    frame_rate = 44100.0
    wav_file = wave.open(sample_in, 'r')
    num_samples_sample = wav_file.getnframes()
    num_channels_sample = wav_file.getnchannels()
    sample = wav_file.readframes(num_samples_sample)
    total_samples_sample = num_samples_sample * num_channels_sample
    wav_file.close()

    # 将二进制数据转换为CuPy数组
    sample = struct.unpack('{n}h'.format(n=total_samples_sample), sample)
    sample = cp.array([sample[0::2], sample[1::2]], dtype=cp.float64)
    sample[0] /= cp.max(cp.abs(sample[0]), axis=0)
    sample[1] /= cp.max(cp.abs(sample[1]), axis=0)

    # 初始化算法变量
    stereospread = 23  # 立体分布大小
    delays_r = [605, 969, 1356, 1562, 1785, 2100, 2600, 3100, 3581, 4025]  # 左声道延迟
    delays_l = [d + stereospread for d in delays_r]  # 右声道延迟
    delays_early = [477, 821, 1161, 1511, 1721, 1951, 2310, 2542, 2751, 2952]  # 早期延迟
    gains_early = [0.87, 0.818, 0.635, 0.719, 0.267, 0.242, 0.192, 0.172, 0.151, 0.111]  # 增益
    g1_list = [0.21, 0.23, 0.25, 0.27, 0.28, 0.25, 0.21, 0.19, 0.17, 0.14]  # 延迟比例
    g = 0.9  # 干音比例
    rev_to_er_delay = 2000
    allpass_delay = 186
    allpass_g = 0.7
    output_gain = 0.075
    dry = 1
    wet = 1
    width = 1
    wet1 = wet * (width / 2 + 0.5)
    wet2 = wet * ((1 - width) / 2)
    early_reflections_r = cp.zeros(sample[0].size)
    early_reflections_l = cp.zeros(sample[1].size)
    combs_out_r = cp.zeros(sample[0].size)
    combs_out_l = cp.zeros(sample[1].size)

    # 主算法部分
    for i in range(10):
        early_reflections_r += delay(sample[0], delays_early, gains_early)[:sample[0].size]
        early_reflections_l += delay(sample[1], delays_early, gains_early)[:sample[1].size]

    for i in range(6):
        combs_out_r += comb_with_lp(sample[0], delays_r, g, g1_list)
        combs_out_l += comb_with_lp(sample[1], delays_l, g, g1_list)

    reverb_r = allpass(combs_out_r, allpass_delay, allpass_g)
    reverb_l = allpass(combs_out_l, allpass_delay, allpass_g)

    early_reflections_r = cp.concatenate((early_reflections_r, cp.zeros(rev_to_er_delay)))
    early_reflections_l = cp.concatenate((early_reflections_l, cp.zeros(rev_to_er_delay)))

    reverb_r = delay(reverb_r, rev_to_er_delay)
    reverb_l = delay(reverb_l, rev_to_er_delay)

    reverb_out_r = early_reflections_r + reverb_r
    reverb_out_l = early_reflections_l + reverb_l

    reverb_out_r = output_gain * ((reverb_out_r * wet1 + reverb_out_l * wet2) + cp.concatenate((sample[0], cp.zeros(rev_to_er_delay))) * dry)
    reverb_out_l = output_gain * ((reverb_out_l * wet1 + reverb_out_r * wet2) + cp.concatenate((sample[1], cp.zeros(rev_to_er_delay))) * dry)

    # 写入文件
    signal_integer_r = (reverb_out_r * int(cp.iinfo(cp.int16).max)).get().astype(cp.int16)  # 转换回主机内存
    signal_integer_l = (reverb_out_l * int(cp.iinfo(cp.int16).max)).get().astype(cp.int16)  # 转换回主机内存
    signal_to_render = cp.empty((signal_integer_r.size + signal_integer_l.size), dtype=cp.int16)
    signal_to_render[0::2] = signal_integer_r
    signal_to_render[1::2] = signal_integer_l
    nframes = total_samples_sample
    comptype = "NONE"
    compname = "not compressed"
    nchannels = 2
    sampwidth = 2
    wav_file_write = wave.open('D:\\321\\222.wav', 'w')
    wav_file_write.setparams((nchannels, sampwidth, int(frame_rate), nframes, comptype, compname))
    for s in range(nframes):
        wav_file_write.writeframes(struct.pack('h', signal_to_render.get()))  # 转换回主机内存
    wav_file_write.close()
    print("处理完毕")

if __name__ == "__main__":
    main()
你试试


补充内容 (2024-8-1 08:48):
记得删掉后面的</strike></i></i></i></i></i></i></i></i>
作者: 没事瞎琢磨    时间: 2024-8-1 09:29
把3060寄过来 这个要实物调试
作者: 冷渣渣    时间: 2024-8-1 09:36
你这个代码不支持3060  但是我的显卡完美支持 你把3060快递给我 再添点钱 我用1050Ti跟你置换  我这个是带Ti的 明白吗  
作者: 音鬼    时间: 2024-8-2 03:10
oncall12 发表于 2024-8-1 08:46
[mw_shl_code=python,true]import cupy as cp  # 导入CuPy库代替NumPy
import scipy.signal as signal
impo ...

early_reflections_r += delay(sample[0], delays_early<i>, gains_early<i>)[:sample[0].size                                                      
SyntaxError: invalid syntax

报错了
作者: 我的yyy123    时间: 2024-8-9 18:02
不运行
作者: 1154890383    时间: 2024-10-1 11:45
import numpy as np import scipy.signal as signal import wave import struct import matplotlib.pyplot as plt import numba  @numba.njit(parallel=True) def allpass(input_signal, delay, gain):     B = np.zeros(delay)     B[0] = gain     B[delay - 1] = 1     A = np.zeros(delay)     A[0] = 1     A[delay - 1] = gain     output_signal = np.zeros(input_signal.shape)     output_signal = signal.lfilter(B, A, input_signal)     return output_signal  @numba.njit(parallel=True) def comb(input_signal, delay, gain):     B = np.zeros(delay)     B[delay - 1] = 1     A = np.zeros(delay)     A[0] = 1     A[delay - 1] = -gain     output_signal = np.zeros(input_signal.shape)     output_signal = signal.lfilter(B, A, input_signal)     return output_signal  @numba.njit(parallel=True) def comb_with_lp(input_signal, delay, g, g1):     g2 = g * (1 - g1)     B = np.zeros(delay + 1)     B[delay - 1] = 1     B[delay] = -g1     A = np.zeros(delay)     A[0] = 1     A[1] = -g1     A[delay - 1] = -g2     output_signal = np.zeros(input_signal.shape)     output_signal = signal.lfilter(B, A, input_signal)     return output_signal  @numba.njit(parallel=True) def delay(input_signal, delay, gain=1):     output_signal = np.concatenate((np.zeros(delay), input_signal))     output_signal = output_signal * gain     return output_signal   def main():     # OPENING / GENERATING TEST WAV SIGNAL      # KRONECKER DELTA      # sample = np.zeros((2,88200))     # sample[:,0] = 1      # WAV FILE      sample_in = 'D:\\321\\555.wav'     frame_rate = 44100.0      wav_file = wave.open(sample_in, 'r')     num_samples_sample = wav_file.getnframes()     num_channels_sample = wav_file.getnchannels()     sample = wav_file.readframes(num_samples_sample)     total_samples_sample = num_samples_sample * num_channels_sample     wav_file.close()      sample = struct.unpack('{n}h'.format(n=total_samples_sample), sample)     sample = np.array([sample[0::2], sample[1::2]], dtype=np.float64)     sample[0] /= np.max(np.abs(sample[0]), axis=0)     sample[1] /= np.max(np.abs(sample[1]), axis=0)      # INITIALIZATION OF ALGORITHM'S VARIABLES      stereospread = 23  # 立体分布大小     delays_r = [605, 969, 1356, 1562, 1785, 2100, 2600, 3100, 3581, 4025]  # 左声道延迟     delays_l = [d + stereospread for d in delays_r]  # 右声道延迟     delays_early = [477, 821, 1161, 1511, 1721, 1951, 2310, 2542, 2751, 2952]  # 早期延迟     gains_early = [0.87, 0.818, 0.635, 0.719, 0.267, 0.242, 0.192, 0.172, 0.151, 0.111]  # 增益     g1_list = [0.21, 0.23, 0.25, 0.27, 0.28, 0.25, 0.21, 0.19, 0.17, 0.14]  # 延迟比例     g = 0.9  # 干音比例     rev_to_er_delay = 2000     allpass_delay = 186     allpass_g = 0.7      output_gain = 0.075     dry = 1     wet = 1     width = 1     wet1 = wet * (width / 2 + 0.5)     wet2 = wet * ((1 - width) / 2)      early_reflections_r = np.zeros(sample[0].size)     early_reflections_l = np.zeros(sample[1].size)      combs_out_r = np.zeros(sample[0].size)     combs_out_l = np.zeros(sample[1].size)      # ALGORIITHM'S MAIN PART      for i in numba.prange(10):         early_reflections_r = early_reflections_r + delay(sample[0], delays_early[i], gains_early[i])[:sample[0].size]         early_reflections_l = early_reflections_l + delay(sample[1], delays_early[i], gains_early[i])[:sample[1].size]     for i in numba.prange(6):         combs_out_r = combs_out_r + comb_with_lp(sample[0], delays_r[i], g, g1_list[i])         combs_out_l = combs_out_l + comb_with_lp(sample[1], delays_l[i], g, g1_list[i])     reverb_r = allpass(combs_out_r, allpass_delay, allpass_g)     reverb_l = allpass(combs_out_l, allpass_delay, allpass_g)     early_reflections_r = np.concatenate((early_reflections_r, np.zeros(rev_to_er_delay)))     early_reflections_l = np.concatenate((early_reflections_l, np.zeros(rev_to_er_delay)))     reverb_r = delay(reverb_r, rev_to_er_delay)     reverb_l = delay(reverb_l, rev_to_er_delay)     reverb_out_r = early_reflections_r + reverb_r     reverb_out_l = early_reflections_l + reverb_l     reverb_out_r = output_gain * ((reverb_out_r * wet1 + reverb_out_l * wet2) + np.concatenate(         (sample[0], np.zeros(rev_to_er_delay))) * dry)     reverb_out_l = output_gain * ((reverb_out_l * wet1 + reverb_out_r * wet2) + np.concatenate(         (sample[1], np.zeros(rev_to_er_delay))) * dry)      # WRITING TO FILE      signal_integer_r = (reverb_out_r * int(np.iinfo(np.int16).max)).astype(np.int16)     signal_integer_l = (reverb_out_l * int(np.iinfo(np.int16).max)).astype(np.int16)     signal_to_render = np.empty((signal_integer_r.size + signal_integer_l.size), dtype=np.int16)     signal_to_render[0::2] = signal_integer_r     signal_to_render[1::2] = signal_integer_l     nframes = total_samples_sample     comptype = "NONE"     compname = "not compressed"     nchannels = 2     sampwidth = 2     wav_file_write = wave.open('D:\\321\\222.wav', 'w')     wav_file_write.setparams((nchannels, sampwidth, int(frame_rate), nframes, comptype, compname))     for s in range(nframes):         wav_file_write.writeframes(struct.pack('h', signal_to_render[s]))      wav_file_write.close()     print("处理完毕")   if __name__ == "__main__":     main()




欢迎光临 精易论坛 (https://125.confly.eu.org/) Powered by Discuz! X3.4