HISE Logo Forum
    • Categories
    • Register
    • Login
    1. HISE
    2. huen97
    • Profile
    • Following 0
    • Followers 0
    • Topics 11
    • Posts 24
    • Groups 0

    huen97

    @huen97

    Hi

    0
    Reputation
    26
    Profile views
    24
    Posts
    0
    Followers
    0
    Following
    Joined
    Last Online
    Age 27
    Location HongKong

    huen97 Unfollow Follow

    Best posts made by huen97

    • HERE IS MY 3D KNOB DESIGN [FREE DOWNLOAD]

      https://www.dropbox.com/s/i5xwfj0rubbr4m8/Maxwell Wong Knob File.zip?dl=0

      Maxwell_Wong_Knob.png

      posted in General Questions
      huen97H
      huen97
    • I wasted 3 hours on deepseek trying to create Autotune, Reverb and Delay in Hise

      When I thought the compile was successful, I assumed everything was okay, but when I tested it, I found that it wasn't working.

      this is autotune

      Content.makeFrontInterface(500, 300);
      
      // ===== 1. 音阶系统实现 =====
      // 使用对象存储音阶数据
      const var Scales = {
          "Chromatic": [0,1,2,3,4,5,6,7,8,9,10,11], // 半音阶
          "Major":     [0,2,4,5,7,9,11],             // 大调音阶
          "Minor":     [0,2,3,5,7,8,10]              // 自然小调
      };
      
      // 存储当前音阶状态
      reg currentScale = Scales.Chromatic;
      reg rootNote = 9; // 默认根音A
      
      // ===== 2. 控件创建 =====
      const var scaleCombo = Content.addComboBox("scaleCombo", 50, 30);
      scaleCombo.set("items", "Chromatic, Major, Minor");  // 注意逗号后的空格
      scaleCombo.set("defaultValue", 0);
      scaleCombo.set("tooltip", "Select musical scale");
      scaleCombo.set("height", 24);  // 明确设置高度
      
      // ===== 2. 根音选择下拉框 =====
      const var rootCombo = Content.addComboBox("rootCombo", 200, 30);
      rootCombo.set("items", "C, C#, D, D#, E, F, F#, G, G#, A, A#, B");  // 逗号后加空格
      rootCombo.set("defaultValue", 9);
      rootCombo.set("tooltip", "Select root note");
      rootCombo.set("height", 24);
      
      // ===== 3. 修正强度旋钮 =====
      const var strengthKnob = Content.addKnob("strength", 350, 30);
      Content.setPropertiesFromJSON("strength", {
          "min": 0,
          "max": 100,
          "stepSize": 1,
          "defaultValue": 80,
          "text": "Strength",
          "tooltip": "Pitch correction strength",
          "height": 30,
      });
      
      function getQuantizedPitch(pitchInHz)
      {
          // 用自然对数计算半音偏移 (替代 Math.log2)
          // 公式: semitone = 12 * (log(pitch/440) / log(2))
          var semitone = 12 * (Math.log(pitchInHz / 440.0) / 0.69314718056); // 0.69314718056 ≈ ln(2)
          
          // 四舍五入到最接近的整数
          semitone = Math.round(semitone);
          
          // 计算音阶内位置
          var scaleDegree = (semitone - rootNote) % currentScale.length;
          if (scaleDegree < 0) scaleDegree += currentScale.length;
          
          // 返回量化后的频率
         return 440.0 * Exp((rootNote + currentScale[scaleDegree]) * 0.05776226505); // 0.05776226505 ≈ ln(2)/12
      }
      
      // ===== 4. 控件回调 =====
      inline function onScaleComboControl(component, value)
      {
          // 通过组件名称访问(避免直接传递control对象)
          currentScale = Scales[scaleCombo.get("text")];
      }
      
      // 绑定回调(使用内联函数)
      scaleCombo.setControlCallback(onScaleComboControl);
      
      // ===== 2. 根音选择回调 =====
      inline function onRootComboControl(component, value)
      {
          rootNote = value; // 直接使用value参数
      }
      
      rootCombo.setControlCallback(onRootComboControl);
      
      // ===== 5. 音频处理块 =====
      function processBlock(data, numChannels, numSamples)
      {
          var strength = strengthKnob.getValue() / 100;
          
          // 单声道处理
          for (var s = 0; s < numSamples; s++)
          {
              var inSample = data[0][s];
              
              // 音高检测(需提前初始化pitchDetector)
              var detectedPitch = pitchDetector.getPitch(inSample);
              
              if (detectedPitch > 0)
              {
                  // 音高量化
                  var targetPitch = getQuantizedPitch(detectedPitch);
                  
                  // 简易音高修正(实际项目应使用Engine.createPitchShifter)
                  var pitchRatio = targetPitch / detectedPitch;
                  data[0][s] = inSample * (1 - strength) + (inSample * pitchRatio) * strength;
                  
                  // 立体声复制
                  if (numChannels > 1) data[1][s] = data[0][s];
              }
          }
      }
      
      // ===== 6. 初始化 =====
      const var fftSize = 1024; // FFT 窗口大小
      reg fftBuffer = [];       // 采样缓冲区
      reg fftPosition = 0;      // 缓冲区位置
      reg lastPitch = 440;      // 最后检测到的音高
      
      // 初始化缓冲区
      for (var i = 0; i < fftSize; i++) {
          fftBuffer[i] = 0;
      }
      
      // ===== 2. 简易音高检测函数 =====
      function detectPitch(sample) {
          // 填充缓冲区
          fftBuffer[fftPosition] = sample;
          fftPosition = (fftPosition + 1) % fftSize;
          
          // 每满一窗口执行检测
          if (fftPosition == 0) {
              // 执行自相关音高检测
              var maxCorrelation = -1;
              var detectedPitch = 0;
              
              for (var lag = 40; lag < fftSize/2; lag++) { // 40≈100Hz下限
                  var correlation = 0;
                  for (var i = 0; i < fftSize - lag; i++) {
                      correlation += fftBuffer[i] * fftBuffer[i + lag];
                  }
                  
                  if (correlation > maxCorrelation) {
                      maxCorrelation = correlation;
                      detectedPitch = Engine.getSampleRate() / lag;
                  }
              }
              
              // 仅接受合理人声范围 (80Hz-1kHz)
              if (detectedPitch > 80 && detectedPitch < 1000) {
                  lastPitch = detectedPitch;
              }
          }
          
          return lastPitch;
      }
      
      // ===== 3. 音频处理块 =====
      function processBlock(data, numChannels, numSamples) {
          for (var s = 0; s < numSamples; s++) 
              var inSample = data[0][s];
              
              // 音高检测(替代pitchDetector)
              var currentPitch = detectPitch(inSample);
              }
      
      
      

      this is Reverb&Delay

      // ===== 1. 参数定义 =====
      const var mixKnob = Content.addKnob("Mix", 0, 0);
      mixKnob.setRange(0, 1, 0.01);
      mixKnob.set("text", "Dry/Wet");
      mixKnob.set("defaultValue", 0.5);
      
      const var sizeKnob = Content.addKnob("Size", 200, 0);
      sizeKnob.setRange(0.1, 10, 0.1);
      sizeKnob.set("text", "Reverb Size");
      sizeKnob.set("defaultValue", 3);
      
      const var feedbackKnob = Content.addKnob("Feedback", 400, 0);
      feedbackKnob.setRange(0, 0.95, 0.01);
      feedbackKnob.set("text", "Delay Feedback");
      feedbackKnob.set("defaultValue", 0.5);
      
      // ===== 2. 延迟线实现 =====
      reg delayBufferL = []; // 左声道延迟缓冲区
      reg delayBufferR = []; // 右声道延迟缓冲区
      reg delayPos = 0;
      const delayMaxSamples = 44100; // 1秒缓冲区
      
      // 初始化缓冲区
      for (i = 0; i < delayMaxSamples; i++) {
          delayBufferL[i] = 0;
          delayBufferR[i] = 0;
      }
      
      // ===== 3. 混响参数 =====
      reg reverbBufferL = [];
      reg reverbBufferR = [];
      reg reverbPos = 0;
      const reverbTime = 0.2 * Engine.getSampleRate(); // 200ms混响
      
      for (i = 0; i < reverbTime; i++) {
          reverbBufferL[i] = 0;
          reverbBufferR[i] = 0;
      }
      
      // ===== 4. 修正后的音频处理 =====
      function processBlock(data, numChannels, numSamples) 
      { for (var s = 0; s < numSamples; s++) 
      // 原始信号(安全获取声道数据)
             var inL = numChannels > 0 ? data[0][s] : 0;
             var inR = numChannels > 1 ? data[1][s] : inL;
             
             // === 延迟处理 ===
                   var delayedL = delayBufferL[delayPos];
                   var delayedR = delayBufferR[delayPos];
                   
                   // 写入新信号(带反馈)
                          delayBufferL[delayPos] = inL + delayedL * feedbackKnob.getValue();
                          delayBufferR[delayPos] = inR + delayedR * feedbackKnob.getValue();
                          
                          // === 混响处理 ===
                          var revL = reverbBufferL[reverbPos] * 0.6;
                          var revR = reverbBufferR[reverbPos] * 0.6;
                          
                          // 写入新混响信号
                                  reverbBufferL[reverbPos] = inL + revL * sizeKnob.getValue();
                                  reverbBufferR[reverbPos] = inR + revR * sizeKnob.getValue();
                                  
                                  // === 混合输出 ===
                                  var wetL = (delayedL + revL) * mixKnob.getValue();
                                  var wetR = (delayedR + revR) * mixKnob.getValue();
                                  
                                  // 输出到声道(确保数组存在)
                                         if (numChannels > 0) data[0][s] = inL * (1 - mixKnob.getValue()) + wetL;
                                         if (numChannels > 1) data[1][s] = inR * (1 - mixKnob.getValue()) + wetR;
                                         
                                         // 更新缓冲区位置
                                         delayPos = (delayPos + 1) % delayMaxSamples;
                                         reverbPos = (reverbPos + 1) % reverbTime;
      }
      
      posted in Scripting
      huen97H
      huen97

    Latest posts made by huen97

    • I wasted 3 hours on deepseek trying to create Autotune, Reverb and Delay in Hise

      When I thought the compile was successful, I assumed everything was okay, but when I tested it, I found that it wasn't working.

      this is autotune

      Content.makeFrontInterface(500, 300);
      
      // ===== 1. 音阶系统实现 =====
      // 使用对象存储音阶数据
      const var Scales = {
          "Chromatic": [0,1,2,3,4,5,6,7,8,9,10,11], // 半音阶
          "Major":     [0,2,4,5,7,9,11],             // 大调音阶
          "Minor":     [0,2,3,5,7,8,10]              // 自然小调
      };
      
      // 存储当前音阶状态
      reg currentScale = Scales.Chromatic;
      reg rootNote = 9; // 默认根音A
      
      // ===== 2. 控件创建 =====
      const var scaleCombo = Content.addComboBox("scaleCombo", 50, 30);
      scaleCombo.set("items", "Chromatic, Major, Minor");  // 注意逗号后的空格
      scaleCombo.set("defaultValue", 0);
      scaleCombo.set("tooltip", "Select musical scale");
      scaleCombo.set("height", 24);  // 明确设置高度
      
      // ===== 2. 根音选择下拉框 =====
      const var rootCombo = Content.addComboBox("rootCombo", 200, 30);
      rootCombo.set("items", "C, C#, D, D#, E, F, F#, G, G#, A, A#, B");  // 逗号后加空格
      rootCombo.set("defaultValue", 9);
      rootCombo.set("tooltip", "Select root note");
      rootCombo.set("height", 24);
      
      // ===== 3. 修正强度旋钮 =====
      const var strengthKnob = Content.addKnob("strength", 350, 30);
      Content.setPropertiesFromJSON("strength", {
          "min": 0,
          "max": 100,
          "stepSize": 1,
          "defaultValue": 80,
          "text": "Strength",
          "tooltip": "Pitch correction strength",
          "height": 30,
      });
      
      function getQuantizedPitch(pitchInHz)
      {
          // 用自然对数计算半音偏移 (替代 Math.log2)
          // 公式: semitone = 12 * (log(pitch/440) / log(2))
          var semitone = 12 * (Math.log(pitchInHz / 440.0) / 0.69314718056); // 0.69314718056 ≈ ln(2)
          
          // 四舍五入到最接近的整数
          semitone = Math.round(semitone);
          
          // 计算音阶内位置
          var scaleDegree = (semitone - rootNote) % currentScale.length;
          if (scaleDegree < 0) scaleDegree += currentScale.length;
          
          // 返回量化后的频率
         return 440.0 * Exp((rootNote + currentScale[scaleDegree]) * 0.05776226505); // 0.05776226505 ≈ ln(2)/12
      }
      
      // ===== 4. 控件回调 =====
      inline function onScaleComboControl(component, value)
      {
          // 通过组件名称访问(避免直接传递control对象)
          currentScale = Scales[scaleCombo.get("text")];
      }
      
      // 绑定回调(使用内联函数)
      scaleCombo.setControlCallback(onScaleComboControl);
      
      // ===== 2. 根音选择回调 =====
      inline function onRootComboControl(component, value)
      {
          rootNote = value; // 直接使用value参数
      }
      
      rootCombo.setControlCallback(onRootComboControl);
      
      // ===== 5. 音频处理块 =====
      function processBlock(data, numChannels, numSamples)
      {
          var strength = strengthKnob.getValue() / 100;
          
          // 单声道处理
          for (var s = 0; s < numSamples; s++)
          {
              var inSample = data[0][s];
              
              // 音高检测(需提前初始化pitchDetector)
              var detectedPitch = pitchDetector.getPitch(inSample);
              
              if (detectedPitch > 0)
              {
                  // 音高量化
                  var targetPitch = getQuantizedPitch(detectedPitch);
                  
                  // 简易音高修正(实际项目应使用Engine.createPitchShifter)
                  var pitchRatio = targetPitch / detectedPitch;
                  data[0][s] = inSample * (1 - strength) + (inSample * pitchRatio) * strength;
                  
                  // 立体声复制
                  if (numChannels > 1) data[1][s] = data[0][s];
              }
          }
      }
      
      // ===== 6. 初始化 =====
      const var fftSize = 1024; // FFT 窗口大小
      reg fftBuffer = [];       // 采样缓冲区
      reg fftPosition = 0;      // 缓冲区位置
      reg lastPitch = 440;      // 最后检测到的音高
      
      // 初始化缓冲区
      for (var i = 0; i < fftSize; i++) {
          fftBuffer[i] = 0;
      }
      
      // ===== 2. 简易音高检测函数 =====
      function detectPitch(sample) {
          // 填充缓冲区
          fftBuffer[fftPosition] = sample;
          fftPosition = (fftPosition + 1) % fftSize;
          
          // 每满一窗口执行检测
          if (fftPosition == 0) {
              // 执行自相关音高检测
              var maxCorrelation = -1;
              var detectedPitch = 0;
              
              for (var lag = 40; lag < fftSize/2; lag++) { // 40≈100Hz下限
                  var correlation = 0;
                  for (var i = 0; i < fftSize - lag; i++) {
                      correlation += fftBuffer[i] * fftBuffer[i + lag];
                  }
                  
                  if (correlation > maxCorrelation) {
                      maxCorrelation = correlation;
                      detectedPitch = Engine.getSampleRate() / lag;
                  }
              }
              
              // 仅接受合理人声范围 (80Hz-1kHz)
              if (detectedPitch > 80 && detectedPitch < 1000) {
                  lastPitch = detectedPitch;
              }
          }
          
          return lastPitch;
      }
      
      // ===== 3. 音频处理块 =====
      function processBlock(data, numChannels, numSamples) {
          for (var s = 0; s < numSamples; s++) 
              var inSample = data[0][s];
              
              // 音高检测(替代pitchDetector)
              var currentPitch = detectPitch(inSample);
              }
      
      
      

      this is Reverb&Delay

      // ===== 1. 参数定义 =====
      const var mixKnob = Content.addKnob("Mix", 0, 0);
      mixKnob.setRange(0, 1, 0.01);
      mixKnob.set("text", "Dry/Wet");
      mixKnob.set("defaultValue", 0.5);
      
      const var sizeKnob = Content.addKnob("Size", 200, 0);
      sizeKnob.setRange(0.1, 10, 0.1);
      sizeKnob.set("text", "Reverb Size");
      sizeKnob.set("defaultValue", 3);
      
      const var feedbackKnob = Content.addKnob("Feedback", 400, 0);
      feedbackKnob.setRange(0, 0.95, 0.01);
      feedbackKnob.set("text", "Delay Feedback");
      feedbackKnob.set("defaultValue", 0.5);
      
      // ===== 2. 延迟线实现 =====
      reg delayBufferL = []; // 左声道延迟缓冲区
      reg delayBufferR = []; // 右声道延迟缓冲区
      reg delayPos = 0;
      const delayMaxSamples = 44100; // 1秒缓冲区
      
      // 初始化缓冲区
      for (i = 0; i < delayMaxSamples; i++) {
          delayBufferL[i] = 0;
          delayBufferR[i] = 0;
      }
      
      // ===== 3. 混响参数 =====
      reg reverbBufferL = [];
      reg reverbBufferR = [];
      reg reverbPos = 0;
      const reverbTime = 0.2 * Engine.getSampleRate(); // 200ms混响
      
      for (i = 0; i < reverbTime; i++) {
          reverbBufferL[i] = 0;
          reverbBufferR[i] = 0;
      }
      
      // ===== 4. 修正后的音频处理 =====
      function processBlock(data, numChannels, numSamples) 
      { for (var s = 0; s < numSamples; s++) 
      // 原始信号(安全获取声道数据)
             var inL = numChannels > 0 ? data[0][s] : 0;
             var inR = numChannels > 1 ? data[1][s] : inL;
             
             // === 延迟处理 ===
                   var delayedL = delayBufferL[delayPos];
                   var delayedR = delayBufferR[delayPos];
                   
                   // 写入新信号(带反馈)
                          delayBufferL[delayPos] = inL + delayedL * feedbackKnob.getValue();
                          delayBufferR[delayPos] = inR + delayedR * feedbackKnob.getValue();
                          
                          // === 混响处理 ===
                          var revL = reverbBufferL[reverbPos] * 0.6;
                          var revR = reverbBufferR[reverbPos] * 0.6;
                          
                          // 写入新混响信号
                                  reverbBufferL[reverbPos] = inL + revL * sizeKnob.getValue();
                                  reverbBufferR[reverbPos] = inR + revR * sizeKnob.getValue();
                                  
                                  // === 混合输出 ===
                                  var wetL = (delayedL + revL) * mixKnob.getValue();
                                  var wetR = (delayedR + revR) * mixKnob.getValue();
                                  
                                  // 输出到声道(确保数组存在)
                                         if (numChannels > 0) data[0][s] = inL * (1 - mixKnob.getValue()) + wetL;
                                         if (numChannels > 1) data[1][s] = inR * (1 - mixKnob.getValue()) + wetR;
                                         
                                         // 更新缓冲区位置
                                         delayPos = (delayPos + 1) % delayMaxSamples;
                                         reverbPos = (reverbPos + 1) % reverbTime;
      }
      
      posted in Scripting
      huen97H
      huen97
    • HI,I have a question, how to add JUCE license in Hise folder??

      How to prove that, i have buying a licensed??

      posted in General Questions
      huen97H
      huen97
    • Lottie animations loop?

      everyone have any ideas? is there any article for reference?

      so exhausted......😖

      posted in General Questions
      huen97H
      huen97
    • RE: I have a questions , How to create bypass button on EQ effect?

      //Bypass0_EQ

      inline function onBypass0Control(component, value)
      {
      ParametriqEQ.setBypassed(ParametriqEQ.BandOffset * 0 + ParametriqEQ.Enabled, value);
      };

      Content.getComponent("Bypass0").setControlCallback(onBypass0Control);

      I don't know how...so now i am trying....

      posted in General Questions
      huen97H
      huen97
    • I have a questions , How to create bypass button on EQ effect?

      I want to create a button , separately for 1-6 EQ...

      posted in General Questions
      huen97H
      huen97
    • RE: Engine.loadUserPreset | Load Preset From Button

      @d-healey why this example not working for me 😖 , I am trying to use the button to load the presets , but fail.....

      posted in General Questions
      huen97H
      huen97
    • RE: I have some question about load presets

      What should I do if I want the combobox to load only a few presets, currently I have created two Samplers, I want this combobox to only load the presets of sampler2

      Sorry my bad English

      posted in General Questions
      huen97H
      huen97
    • RE: I have some question about load presets

      1234.png

      posted in General Questions
      huen97H
      huen97
    • some questions about Licensing

      Deleted.

      posted in General Questions
      huen97H
      huen97
    • How to get hise commercial license?

      How much money to buy? :grinning_squinting_face:

      @Christoph-Hart

      posted in General Questions
      huen97H
      huen97