MSE-Optimal Quantizer Design for Gaussian Input Source

2 min read
In this article, we would explore how to design a quantizer for gaussian sources.
$$\tau_s = \frac{1}{\pi}\cdot \sum_{m}\phi_s(m)\cdot e^{j2\pi\theta}$$
# MSE Optimal Quantizer Design for Gaussian Source
import numpy as np
from scipy import integrate
numQuantLevels = 32
# Define the parameters of the Gaussian source
sigma = 4.0
mu = 10.0
# Define the boundaries of the quantization regions
boundaries = np.zeros(numQuantLevels+1)
boundaries[0] = -np.inf
boundaries[-1] = np.inf
boundaries[1:-1] = np.linspace(-1.25 * sigma, 1.25 * sigma, numQuantLevels-1)
boundaries += mu
def compute_quant_level(start, end):
# Computes the centroid of the quantization region
if (start == -np.inf):
start = -100
if (end == np.inf):
end = 100
f_x = lambda x: x * (1/np.sqrt(2 * np.pi * (sigma**2))) * np.exp(-((x - mu)**2) / (2 * (sigma**2)))
p_x = lambda x: (1/np.sqrt(2 * np.pi * (sigma**2))) * np.exp(-((x - mu)**2) / (2 * (sigma**2)))
quant_level = integrate.quad(f_x, start, end)[0] / integrate.quad(p_x, start, end)[0]
return quant_level
# Define the quantization levels
quantLevels = np.zeros(numQuantLevels)
for i in range(numQuantLevels):
quantLevels[i] = compute_quant_level(boundaries[i], boundaries[i+1])
# We have initial quantization levels and boundaries. Now, we run the Lloyd-Max algorithm
# to find the optimal quantization levels that minimize the MSE.
# Testing performance of quantization
numSamples = 500000
# Generate samples from the Gaussian source for testing
samples = np.random.normal(mu, sigma, numSamples)
numIters = 30
for iter_n in range(numIters):
for i in range(1, numQuantLevels):
boundaries[i] = (quantLevels[i-1] + quantLevels[i]) / 2
# Compute the quantization levels
for i in range(numQuantLevels):
quantLevels[i] = compute_quant_level(boundaries[i], boundaries[i+1])
# Quantize the samples
quantizedSamples = np.zeros(numSamples)
for j in range(numQuantLevels):
mask = (samples >= boundaries[j]) & (samples < boundaries[j+1])
quantizedSamples[mask] = quantLevels[j]
# Compute the MSE
mse_t = np.mean((samples - quantizedSamples)**2)
if iter_n == numIters-1:
print(f"MSE: {mse_t} | Quantization Levels: {quantLevels}")
This code works :D
0
Subscribe to my newsletter
Read articles from Yash! directly inside your inbox. Subscribe to the newsletter, and don't miss out.
Written by
