@ARTICLE{10.3389/fnins.2015.00347, AUTHOR={Zai, Anja T. and Bhargava, Saurabh and Mesgarani, Nima and Liu, Shih-Chii}, TITLE={Reconstruction of audio waveforms from spike trains of artificial cochlea models}, JOURNAL={Frontiers in Neuroscience}, VOLUME={9}, YEAR={2015}, URL={https://www.frontiersin.org/articles/10.3389/fnins.2015.00347}, DOI={10.3389/fnins.2015.00347}, ISSN={1662-453X}, ABSTRACT={Spiking cochlea models describe the analog processing and spike generation process within the biological cochlea. Reconstructing the audio input from the artificial cochlea spikes is therefore useful for understanding the fidelity of the information preserved in the spikes. The reconstruction process is challenging particularly for spikes from the mixed signal (analog/digital) integrated circuit (IC) cochleas because of multiple non-linearities in the model and the additional variance caused by random transistor mismatch. This work proposes an offline method for reconstructing the audio input from spike responses of both a particular spike-based hardware model called the AEREAR2 cochlea and an equivalent software cochlea model. This method was previously used to reconstruct the auditory stimulus based on the peri-stimulus histogram of spike responses recorded in the ferret auditory cortex. The reconstructed audio from the hardware cochlea is evaluated against an analogous software model using objective measures of speech quality and intelligibility; and further tested in a word recognition task. The reconstructed audio under low signal-to-noise (SNR) conditions (SNR < –5 dB) gives a better classification performance than the original SNR input in this word recognition task.} }