/* Edge Impulse Arduino examples
* Copyright (c) 2022 EdgeImpulse Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// These sketches are tested with 2.0.4 ESP32 Arduino Core
// https://github.com/espressif/arduino-esp32/releases/tag/2.0.4
// If your target is limited in memory remove this macro to save 10K RAM
#define EIDSP_QUANTIZE_FILTERBANK 0
/*
** NOTE: If you run into TFLite arena allocation issue.
**
** This may be due to may dynamic memory fragmentation.
** Try defining "-DEI_CLASSIFIER_ALLOCATION_STATIC" in boards.local.txt (create
** if it doesn't exist) and copy this file to
** `<ARDUINO_CORE_INSTALL_PATH>/arduino/hardware/<mbed_core>/<core_version>/`.
**
** See
** (https://support.arduino.cc/hc/en-us/articles/360012076960-Where-are-the-installed-cores-located-)
** to find where Arduino installs cores on your machine.
**
** If the problem persists then there's not enough memory for this model and application.
*/
/* Includes ---------------------------------------------------------------- */
#include <Project-name_inferencing.h>
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
/** Audio buffers, pointers and selectors */
typedef struct
{
int16_t *buffer;
uint8_t buf_ready;
uint32_t buf_count;
uint32_t n_samples;
} inference_t;
static inference_t inference;
static signed short sampleBuffer[EI_CLASSIFIER_RAW_SAMPLE_COUNT];
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal
static bool record_status = true;
static TaskHandle_t xCaptureTaskHandle = NULL;
const int led = 21;
const int led_record = 8;
const int AUDIO_IN = 0; // 选择 GPIO 0 作为模拟输入
/**
* @brief Arduino setup function
*/
void setup()
{
// put your setup code here, to run once:
Serial.begin(115200);
// comment out the below line to cancel the wait for USB connection (needed for native USB)
while (!Serial)
;
Serial.println("Edge Impulse Inferencing");
// summary of inferencing settings (from model_metadata.h)
ei_printf("Inferencing settings:\n");
ei_printf("\tInterval: ");
ei_printf_float((float)EI_CLASSIFIER_INTERVAL_MS);
ei_printf(" ms.\n");
ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16);
ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0]));
ei_printf("\nStarting continious inference in 2 seconds...\n");
pinMode(led, OUTPUT);
pinMode(led_record, OUTPUT);
digitalWrite(led, 1);
ei_sleep(2000);
digitalWrite(led, 0);
if (microphone_inference_start(EI_CLASSIFIER_RAW_SAMPLE_COUNT) == false)
{
ei_printf("ERR: Could not allocate audio buffer (size %d), this could be due to the window length of your model\r\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT);
return;
}
ei_printf("Recording...\n");
xTaskNotifyGive(xCaptureTaskHandle);
}
/**
* @brief Arduino main function. Runs the inferencing loop.
*/
float max_probability = 0;
int max_probability_ix = 0;
void loop()
{
bool m = microphone_inference_record();
if (!m)
{
ei_printf("ERR: Failed to record audio...\n");
return;
}
signal_t signal;
signal.total_length = EI_CLASSIFIER_RAW_SAMPLE_COUNT;
signal.get_data = µphone_audio_signal_get_data;
ei_impulse_result_t result = {0};
EI_IMPULSE_ERROR r = run_classifier(&signal, &result, debug_nn);
if (r != EI_IMPULSE_OK)
{
ei_printf("ERR: Failed to run classifier (%d)\n", r);
return;
}
// print the predictions
ei_printf("Predictions ");
ei_printf("(DSP: %d ms., Classification: %d ms., Anomaly: %d ms.)",
result.timing.dsp, result.timing.classification, result.timing.anomaly);
ei_printf(": \n");
max_probability = 0;
max_probability_ix = 0;
for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++)
{
ei_printf(" %s: ", result.classification[ix].label);
ei_printf_float(result.classification[ix].value);
ei_printf("\n");
if (max_probability <= result.classification[ix].value)
{
max_probability = result.classification[ix].value;
max_probability_ix = ix;
}
}
if (strcmp(result.classification[max_probability_ix].label, "beihang") == 0)
{
ei_printf("北京航空航天大学\n");
digitalWrite(led, LOW);
}
else if (strcmp(result.classification[max_probability_ix].label, "shie") == 0)
{
ei_printf("士谔书院\n");
digitalWrite(led, HIGH);
}
else if (strcmp(result.classification[max_probability_ix].label, "hyn") == 0)
{
ei_printf("何玥凝\n");
digitalWrite(led, 0);
}
else if (strcmp(result.classification[max_probability_ix].label, "hsy") == 0)
{
ei_printf("黄诗莹\n");
digitalWrite(led, 0);
}
else {
ei_printf("未检测到有效指令\n");
digitalWrite(led, LOW);
}
#if EI_CLASSIFIER_HAS_ANOMALY == 1
ei_printf(" anomaly score: ");
ei_printf_float(result.anomaly);
ei_printf("\n");
#endif
xTaskNotifyGive(xCaptureTaskHandle);
}
static void audio_inference_callback(uint32_t n_bytes)
{
ei_printf("DATA ");
for (int i = 0; i < n_bytes; i++)
{
inference.buffer[inference.buf_count++] = sampleBuffer[i];
if (inference.buf_count >= inference.n_samples)
{
inference.buf_count = 0;
inference.buf_ready = 1;
}
// if(i > 8000 )
{
ei_printf("%d ", sampleBuffer[i]);
}
}
ei_printf("\n");
}
unsigned long pre_time = 0;
const byte sample_interval = 125; //采样时间间隔125us,即采样率8kHz
const byte scale = 4; //幅度缩放因子
const int thresh = 400;
int adcValue = 0;
bool ADCfast(void)
{
static int zero = 580;
int curSample = 0;
static unsigned long prevTime;
long val = 0;
bool Collecting = false;
unsigned long triggerTimeout = millis(); // 超时计时
while(!Collecting && (millis() - triggerTimeout < 2000)){
prevTime = micros();
adcValue = analogRead(AUDIO_IN); // 读取模拟信号
val = adcValue / scale; // 将ADC采样值进行尺度缩放
// 去除直流偏置值
if (val < zero)
zero--;
else
zero++;
val = val - zero;
if (abs(val) > thresh){
Collecting = true;
break;
}
while (micros() - prevTime < sample_interval);
}
for (curSample = 0; curSample < EI_CLASSIFIER_RAW_SAMPLE_COUNT; curSample++)
{
prevTime = micros();
adcValue = analogRead(AUDIO_IN); // 读取模拟信号
val = adcValue / scale; // 将ADC采样值进行尺度缩放
// 去除直流偏置值
if (val < zero)
zero--;
else
zero++;
val = val - zero;
sampleBuffer[curSample] = val;
while (micros() - prevTime < sample_interval);
}
return Collecting;
}
static void capture_samples(void *arg)
{
const int32_t bytes_to_read = (uint32_t)arg;
size_t bytes_read = bytes_to_read;
const int32_t threshold = 625;
int threshold_cnt;
int start_point;
while (record_status)
{
ulTaskNotifyTake(pdTRUE, pdMS_TO_TICKS(5000));
ei_printf("----------------请说语音指令----------------\n");
digitalWrite(led_record, LOW);
ADCfast();
digitalWrite(led_record, HIGH);
if (record_status)
{
audio_inference_callback(EI_CLASSIFIER_RAW_SAMPLE_COUNT);
}
else
{
break;
}
vTaskDelay(pdMS_TO_TICKS(2000));
}
vTaskDelete(NULL);
}
/**
* @brief Init inferencing struct and setup/start PDM
* @param [in] n_samples The n samples
* @return { description_of_the_return_value }
*/
static bool microphone_inference_start(uint32_t n_samples)
{
inference.buffer = (int16_t *)malloc(n_samples * sizeof(int16_t));
if (inference.buffer == NULL)
{
return false;
}
inference.buf_count = 0;
inference.n_samples = n_samples;
inference.buf_ready = 0;
ei_sleep(100);
record_status = true;
xTaskCreate(capture_samples, "CaptureSamples", 1024 * 32, (void *)(EI_CLASSIFIER_RAW_SAMPLE_COUNT * sizeof(sampleBuffer[0])), 10, &xCaptureTaskHandle);
return true;
}
static bool microphone_inference_record(void)
{
bool ret = true;
while (inference.buf_ready == 0)
{
delay(10);
}
inference.buf_ready = 0;
return ret;
}
static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr)
{
numpy::int16_to_float(&inference.buffer[offset], out_ptr, length);
return 0;
}
static void microphone_inference_end(void)
{
ei_free(inference.buffer);
}
#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
#error "Invalid model for current sensor."
#endif 这个代码,我需要通过麦克风读入外界音频进行识别,但是实际上对于大多是人声,这个代码在串口监视器里面反馈的都是label何玥凝,其他标签反馈不出来,现在已知是模型精确度很高,达到97%