Uml Pca

4 min read
#PCA Dimensionality Reduction
## Question 1: IRIs
# Import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# Load the Iris Dataset
iris = load_iris()
X = iris.data # Features
y = iris.target # Target (species)
# Standardize the data (means=0, variance=1)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Apply PCA
pca = PCA(n_components=2) # Reduce to 2 dimensions
X_pca = pca.fit_transform(X_scaled)
# Create DataFrames for visualization
original_df = pd.DataFrame(data=X[:, :2], columns=['Sepal Length', 'Sepal Width']) # Use first 2 features for original plot
original_df['Species'] = y
pca_df = pd.DataFrame(data=X_pca, columns=['PC1', 'PC2'])
pca_df['Species'] = y
# Plot the original data (first two features)
plt.figure(figsize=(12,5))
# Original Data Plot
plt.subplot(1,2,1)
colors = ['red', 'green', 'blue']
species = iris.target_names
for i, color in enumerate(colors):
plt.scatter(original_df.loc[original_df['Species'] == i, 'Sepal Length'],
original_df.loc[original_df['Species'] == i, 'Sepal Width'],
c=color, label=species[i])
plt.title('Original Data (Sepal Length vs. Sepal Width)')
plt.xlabel('Sepal Length')
plt.ylabel('Sepal Width')
plt.legend()
plt.grid()
# PCA Transformed Data Plot
plt.subplot(1,2,2)
for i, color in enumerate(colors):
plt.scatter(pca_df.loc[pca_df['Species'] == i, 'PC1'],
pca_df.loc[pca_df['Species'] == i, 'PC2'],
c=color, label=species[i])
plt.title('PCA Transformed Data (PC1 vs. PC2)')
plt.xlabel('Principal Component 1 (PC1)')
plt.ylabel('Principal Component 2 (PC2)')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
explained_variance = pca.explained_variance_ratio_
print("Explained Variance Ratio:", explained_variance)
#Question 2: Customer Segmentation
# Import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
# Set random seed for reproducibility
np.random.seed(42)
# Generate synthetic dataset
num_customers = 200
customer_data = pd.DataFrame({
'Annual Income ($1000s)': np.random.randint(20, 120, num_customers),
'Spending Score (1-100)': np.random.randint(1, 101, num_customers),
'Frequency of Purchase': np.random.randint(1, 20, num_customers),
'Average Cart Value ($)': np.random.randint(50, 500, num_customers)
})
# Standardise the Dataset
scaled_data = StandardScaler().fit_transform(customer_data)
# Apply PCA for dimensionality reduction
pca = PCA(n_components=2)
pca_df = pd.DataFrame(pca.fit_transform(scaled_data), columns=['PC1', 'PC2'])
# Scree Plot to visualise explained variance
plt.figure(figsize=(8, 5))
plt.plot(range(1, 3), pca.explained_variance_ratio_, marker='o', linestyle='--', color='b')
plt.xlabel("Principal Components")
plt.ylabel("Explained Variance Ratio")
plt.title('Scree Plot for PCA')
plt.grid(True)
plt.show()
# Determine optimal number of clusters using the Elbow Method
inertia = [KMeans(n_clusters=k, random_state=42, n_init=10).fit(pca_df).inertia_ for k in range(1,11)]
plt.figure(figsize=(8, 5))
plt.plot(range(1, 11), inertia, marker='o', linestyle='--', color='r')
plt.xlabel('Number of Clusters (K)')
plt.ylabel('Inertia')
plt.title('Elbow Method for Optimal K')
plt.grid(True)
plt.show()
# Apply K-Means Clustering
optimal_k = 4
kmeans = KMeans(n_clusters=optimal_k, random_state=42, n_init=10)
pca_df['Cluster'] = kmeans.fit_predict(pca_df)
# Visualise PCA clusters
plt.figure(figsize=(10, 6))
sns.scatterplot(data=pca_df, x='PC1', y='PC2', hue='Cluster', palette='viridis', s=100, edgecolor='black')
plt.title('PCA-Based Customer Segmentation')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(title='Cluster')
plt.grid(True)
plt.show()
# Compare original and projected results
plt.figure(figsize=(10, 5))
sns.scatterplot(data=customer_data, x='Annual Income ($1000s)', y='Spending Score (1-100)', hue=pca_df['Cluster'], palette='viridis', s=100, edgecolor='black')
plt.title('Original Customer Segmentation (Before PCA)')
plt.xlabel('Annual Income ($1000s)')
plt.ylabel('Spending Score (1-100)')
plt.legend(title='Cluster')
plt.grid(True)
plt.show()
# Display final dataset with clusters
customer_data['Cluster'] = pca_df['Cluster']
print(customer_data.head())
#Case Study: Anomaly Detection in Network Traffic Using PCA
# Import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
# Set random seed for reproducibility
np.random.seed(42)
num_packets = 1000
network_data = pd.DataFrame({
'Packet Size (bytes)': np.random.randint(40, 1500, num_packets),
'Duration (ms)': np.random.uniform(0.1, 10.0, num_packets),
'Num Packets in Session': np.random.randint(1, 50, num_packets),
'Protocol Type': np.random.choice([1, 2, 3], num_packets), # TCP, UDP, ICMP
'Source-Destination Similarity': np.random.uniform(0, 1, num_packets)
})
# Introduce anomalous behavior (e.g., large packet sizes, high session count)
anomaly_indices = np.random.choice(num_packets, size=30, replace=False)
network_data.loc[anomaly_indices, 'Packet Size (bytes)'] *= 10 # Large packet sizes
network_data.loc[anomaly_indices, 'Num Packets in Session'] *= 5 # High session count
# Step 2: Standardise the Dataset
scaled_data = StandardScaler().fit_transform(network_data)
# Step 3: Apply PCA for dimensionality reduction
pca = PCA(n_components=2)
pca_df = pd.DataFrame(pca.fit_transform(scaled_data), columns=['PC1', 'PC2'])
# Step 4: Scree Plot to visualise explained variance
plt.figure(figsize=(8, 5))
plt.plot(range(1, 3), pca.explained_variance_ratio_, marker='o', linestyle='--', color='b')
plt.xlabel("Principal Components")
plt.ylabel("Explained Variance Ratio")
plt.title('Scree Plot for PCA')
plt.grid(True)
plt.show()
# Determine optimal number of clusters using the Elbow Method
inertia = [KMeans(n_clusters=k, random_state=42, n_init=10).fit(pca_df).inertia_ for k in range(1,11)]
plt.figure(figsize=(8, 5))
plt.plot(range(1, 11), inertia, marker='o', linestyle='--', color='r')
plt.xlabel('Number of Clusters (K)')
plt.ylabel('Inertia')
plt.title('Elbow Method for Optimal K')
plt.grid(True)
plt.show()
# Apply K-Means Clustering
optimal_k = 4
kmeans = KMeans(n_clusters=optimal_k, random_state=42, n_init=10)
pca_df['Cluster'] = kmeans.fit_predict(pca_df)
# Visualise PCA clusters
plt.figure(figsize=(10, 6))
sns.scatterplot(data=pca_df, x='PC1', y='PC2', hue='Cluster', palette='coolwarm', s=100, edgecolor='black')
plt.title('PCA-Based Network Traffic Clustering')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(title='Cluster')
plt.grid(True)
plt.show()
# Step 8: Compare normal and anomalous traffic
network_data['Cluster'] = pca_df['Cluster']
anomalous_traffic = network_data.loc[network_data.index.isin(anomaly_indices)]
print("Identified Anomalous Traffic samples:")
print(anomalous_traffic)
# Display final dataset with clusters
print(network_data.head())
0
Subscribe to my newsletter
Read articles from Invoker directly inside your inbox. Subscribe to the newsletter, and don't miss out.
Written by
