Skip to content

Utilities API

This page documents utility functions and helper classes used throughout the quantum data embedding suite.

Data Validation and Preprocessing

validate_data

validate_data(X, n_qubits)

Validate and preprocess data for quantum embedding.

Parameters:

Name Type Description Default
X array - like

Input data

required
n_qubits int

Number of qubits available

required

Returns:

Name Type Description
X ndarray

Validated and potentially reshaped data

Source code in quantum_data_embedding_suite\utils.py
def validate_data(X: np.ndarray, n_qubits: int) -> np.ndarray:
    """
    Validate and preprocess data for quantum embedding.

    Parameters
    ----------
    X : array-like
        Input data
    n_qubits : int
        Number of qubits available

    Returns
    -------
    X : ndarray
        Validated and potentially reshaped data
    """
    X = np.asarray(X, dtype=np.float64)

    if X.ndim == 1:
        X = X.reshape(1, -1)
    elif X.ndim != 2:
        raise ValueError(f"Data must be 1D or 2D array, got {X.ndim}D")

    # Basic validation
    if np.any(~np.isfinite(X)):
        raise ValueError("Data contains NaN or infinite values")

    return X

normalize_data

normalize_data(X, method='minmax', feature_range=(0, 2 * np.pi))

Normalize data for quantum embedding.

Parameters:

Name Type Description Default
X array - like

Input data

required
method str

Normalization method ('minmax', 'standard', 'unit')

'minmax'
feature_range tuple

Target range for minmax scaling

(0, 2*pi)

Returns:

Name Type Description
X_normalized ndarray

Normalized data

Source code in quantum_data_embedding_suite\utils.py
def normalize_data(
    X: np.ndarray, 
    method: str = "minmax", 
    feature_range: Tuple[float, float] = (0, 2*np.pi)
) -> np.ndarray:
    """
    Normalize data for quantum embedding.

    Parameters
    ----------
    X : array-like
        Input data
    method : str, default='minmax'
        Normalization method ('minmax', 'standard', 'unit')
    feature_range : tuple, default=(0, 2*pi)
        Target range for minmax scaling

    Returns
    -------
    X_normalized : ndarray
        Normalized data
    """
    X = np.asarray(X, dtype=np.float64)

    if method == "minmax":
        scaler = MinMaxScaler(feature_range=feature_range)
        return scaler.fit_transform(X)
    elif method == "standard":
        scaler = StandardScaler()
        return scaler.fit_transform(X)
    elif method == "unit":
        # Normalize each sample to unit norm
        norms = np.linalg.norm(X, axis=1, keepdims=True)
        norms = np.where(norms == 0, 1, norms)  # Avoid division by zero
        return X / norms
    else:
        raise ValueError(f"Unknown normalization method: {method}")

pad_features

pad_features(X, target_features)

Pad features to match target dimension.

Parameters:

Name Type Description Default
X array - like

Input data

required
target_features int

Target number of features

required

Returns:

Name Type Description
X_padded ndarray

Data padded to target dimension

Source code in quantum_data_embedding_suite\utils.py
def pad_features(X: np.ndarray, target_features: int) -> np.ndarray:
    """
    Pad features to match target dimension.

    Parameters
    ----------
    X : array-like
        Input data
    target_features : int
        Target number of features

    Returns
    -------
    X_padded : ndarray
        Data padded to target dimension
    """
    X = np.asarray(X)

    if X.shape[1] >= target_features:
        return X[:, :target_features]

    # Pad with zeros
    n_samples = X.shape[0]
    n_pad = target_features - X.shape[1]
    padding = np.zeros((n_samples, n_pad))

    return np.hstack([X, padding])

reduce_features

reduce_features(X, target_features, method='pca')

Reduce feature dimension using classical methods.

Parameters:

Name Type Description Default
X array - like

Input data

required
target_features int

Target number of features

required
method str

Reduction method ('pca', 'truncate', 'average')

'pca'

Returns:

Name Type Description
X_reduced ndarray

Data with reduced features

Source code in quantum_data_embedding_suite\utils.py
def reduce_features(
    X: np.ndarray, 
    target_features: int, 
    method: str = "pca"
) -> np.ndarray:
    """
    Reduce feature dimension using classical methods.

    Parameters
    ----------
    X : array-like
        Input data
    target_features : int
        Target number of features
    method : str, default='pca'
        Reduction method ('pca', 'truncate', 'average')

    Returns
    -------
    X_reduced : ndarray
        Data with reduced features
    """
    X = np.asarray(X)

    if X.shape[1] <= target_features:
        return X

    if method == "pca":
        from sklearn.decomposition import PCA
        pca = PCA(n_components=target_features)
        return pca.fit_transform(X)
    elif method == "truncate":
        return X[:, :target_features]
    elif method == "average":
        # Average neighboring features
        n_groups = target_features
        group_size = X.shape[1] // n_groups

        X_reduced = np.zeros((X.shape[0], n_groups))
        for i in range(n_groups):
            start_idx = i * group_size
            end_idx = (i + 1) * group_size if i < n_groups - 1 else X.shape[1]
            X_reduced[:, i] = np.mean(X[:, start_idx:end_idx], axis=1)

        return X_reduced
    else:
        raise ValueError(f"Unknown reduction method: {method}")

Data Generation and Analysis

generate_random_data

generate_random_data(n_samples, n_features, data_type='gaussian', seed=None)

Generate random data for testing.

Parameters:

Name Type Description Default
n_samples int

Number of samples

required
n_features int

Number of features

required
data_type str

Type of random data ('gaussian', 'uniform', 'sphere')

'gaussian'
seed int

Random seed for reproducibility

None

Returns:

Name Type Description
X ndarray

Random data

Source code in quantum_data_embedding_suite\utils.py
def generate_random_data(
    n_samples: int, 
    n_features: int, 
    data_type: str = "gaussian",
    seed: Optional[int] = None
) -> np.ndarray:
    """
    Generate random data for testing.

    Parameters
    ----------
    n_samples : int
        Number of samples
    n_features : int
        Number of features
    data_type : str, default='gaussian'
        Type of random data ('gaussian', 'uniform', 'sphere')
    seed : int, optional
        Random seed for reproducibility

    Returns
    -------
    X : ndarray
        Random data
    """
    if seed is not None:
        np.random.seed(seed)

    if data_type == "gaussian":
        return np.random.normal(0, 1, (n_samples, n_features))
    elif data_type == "uniform":
        return np.random.uniform(-1, 1, (n_samples, n_features))
    elif data_type == "sphere":
        # Points on unit hypersphere
        X = np.random.normal(0, 1, (n_samples, n_features))
        norms = np.linalg.norm(X, axis=1, keepdims=True)
        return X / norms
    else:
        raise ValueError(f"Unknown data type: {data_type}")

compute_pairwise_distances

compute_pairwise_distances(X)

Compute pairwise Euclidean distances.

Parameters:

Name Type Description Default
X array - like

Input data

required

Returns:

Name Type Description
distances ndarray

Pairwise distance matrix

Source code in quantum_data_embedding_suite\utils.py
def compute_pairwise_distances(X: np.ndarray) -> np.ndarray:
    """
    Compute pairwise Euclidean distances.

    Parameters
    ----------
    X : array-like
        Input data

    Returns
    -------
    distances : ndarray
        Pairwise distance matrix
    """
    from sklearn.metrics.pairwise import euclidean_distances
    return euclidean_distances(X)

compute_data_statistics

compute_data_statistics(X)

Compute basic statistics of the data.

Parameters:

Name Type Description Default
X array - like

Input data

required

Returns:

Name Type Description
stats dict

Dictionary containing data statistics

Source code in quantum_data_embedding_suite\utils.py
def compute_data_statistics(X: np.ndarray) -> dict:
    """
    Compute basic statistics of the data.

    Parameters
    ----------
    X : array-like
        Input data

    Returns
    -------
    stats : dict
        Dictionary containing data statistics
    """
    X = np.asarray(X)

    stats = {
        'shape': X.shape,
        'mean': np.mean(X, axis=0),
        'std': np.std(X, axis=0),
        'min': np.min(X, axis=0),
        'max': np.max(X, axis=0),
        'median': np.median(X, axis=0),
        'range': np.ptp(X, axis=0),  # peak-to-peak (max - min)
    }

    # Global statistics
    stats.update({
        'global_mean': np.mean(X),
        'global_std': np.std(X),
        'global_min': np.min(X),
        'global_max': np.max(X),
        'condition_number': np.linalg.cond(X @ X.T) if X.shape[0] <= X.shape[1] else np.linalg.cond(X.T @ X),
    })

    return stats

estimate_optimal_qubits

estimate_optimal_qubits(X, embedding_type='angle')

Estimate optimal number of qubits for data.

Parameters:

Name Type Description Default
X array - like

Input data

required
embedding_type str

Type of embedding

'angle'

Returns:

Name Type Description
n_qubits int

Recommended number of qubits

Source code in quantum_data_embedding_suite\utils.py
def estimate_optimal_qubits(X: np.ndarray, embedding_type: str = "angle") -> int:
    """
    Estimate optimal number of qubits for data.

    Parameters
    ----------
    X : array-like
        Input data
    embedding_type : str, default='angle'
        Type of embedding

    Returns
    -------
    n_qubits : int
        Recommended number of qubits
    """
    X = np.asarray(X)
    n_features = X.shape[1]

    if embedding_type == "angle":
        # For angle embedding, typically 1 feature per qubit
        return min(n_features, 10)  # Cap at 10 qubits for practicality
    elif embedding_type == "amplitude":
        # For amplitude embedding, need 2^n_qubits >= n_features
        return max(1, int(np.ceil(np.log2(n_features))))
    elif embedding_type in ["iqp", "data_reuploading"]:
        # These can be more flexible
        return min(max(2, n_features // 2), 8)
    else:
        # Default heuristic
        return min(max(2, n_features), 6)

check_quantum_advantage_potential

check_quantum_advantage_potential(X, y=None)

Assess potential for quantum advantage on given data.

Parameters:

Name Type Description Default
X array - like

Input features

required
y array - like

Target labels

None

Returns:

Name Type Description
assessment dict

Dictionary containing quantum advantage potential assessment

Source code in quantum_data_embedding_suite\utils.py
def check_quantum_advantage_potential(X: np.ndarray, y: np.ndarray = None) -> dict:
    """
    Assess potential for quantum advantage on given data.

    Parameters
    ----------
    X : array-like
        Input features
    y : array-like, optional
        Target labels

    Returns
    -------
    assessment : dict
        Dictionary containing quantum advantage potential assessment
    """
    X = np.asarray(X)
    n_samples, n_features = X.shape

    assessment = {
        'data_size': 'small' if n_samples < 100 else 'medium' if n_samples < 1000 else 'large',
        'feature_dimension': 'low' if n_features < 5 else 'medium' if n_features < 20 else 'high',
        'recommended_qubits': estimate_optimal_qubits(X),
    }

    # Assess data complexity
    try:
        from sklearn.decomposition import PCA
        pca = PCA()
        pca.fit(X)

        # Intrinsic dimensionality
        explained_var_ratio = pca.explained_variance_ratio_
        intrinsic_dim = np.sum(np.cumsum(explained_var_ratio) < 0.95) + 1
        assessment['intrinsic_dimension'] = intrinsic_dim

        # Data spread
        assessment['data_spread'] = 'concentrated' if np.sum(explained_var_ratio[:2]) > 0.9 else 'distributed'

    except Exception:
        assessment['intrinsic_dimension'] = n_features
        assessment['data_spread'] = 'unknown'

    # If labels are provided, assess separability
    if y is not None:
        y = np.asarray(y)
        n_classes = len(np.unique(y))
        assessment['n_classes'] = n_classes
        assessment['classification_complexity'] = 'binary' if n_classes == 2 else 'multiclass'

        # Simple separability check
        try:
            from sklearn.linear_model import LogisticRegression
            from sklearn.model_selection import cross_val_score

            lr = LogisticRegression(max_iter=1000)
            scores = cross_val_score(lr, X, y, cv=min(5, n_samples // 10))
            linear_accuracy = np.mean(scores)

            assessment['linear_separability'] = 'high' if linear_accuracy > 0.9 else 'medium' if linear_accuracy > 0.7 else 'low'
            assessment['quantum_advantage_potential'] = 'high' if linear_accuracy < 0.8 else 'medium' if linear_accuracy < 0.9 else 'low'

        except Exception:
            assessment['linear_separability'] = 'unknown'
            assessment['quantum_advantage_potential'] = 'unknown'

    return assessment

Visualization Functions

These visualization functions are available in the visualization module:

plot_kernel_comparison

plot_kernel_comparison(K_quantum, K_classical, output_file=None, figsize=(12, 5))

Plot comparison between quantum and classical kernel matrices.

Parameters:

Name Type Description Default
K_quantum array - like

Quantum kernel matrix

required
K_classical array - like

Classical kernel matrix

required
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(12, 5)
Source code in quantum_data_embedding_suite\visualization.py
@requires_license()
def plot_kernel_comparison(
    K_quantum: np.ndarray,
    K_classical: np.ndarray,
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (12, 5)
) -> None:
    """
    Plot comparison between quantum and classical kernel matrices.

    Parameters
    ----------
    K_quantum : array-like
        Quantum kernel matrix
    K_classical : array-like
        Classical kernel matrix
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(12, 5)
        Figure size (width, height)
    """
    fig, axes = plt.subplots(1, 3, figsize=figsize)

    # Plot quantum kernel
    im1 = axes[0].imshow(K_quantum, cmap='viridis', vmin=0, vmax=1)
    axes[0].set_title('Quantum Kernel')
    axes[0].set_xlabel('Sample Index')
    axes[0].set_ylabel('Sample Index')
    plt.colorbar(im1, ax=axes[0])

    # Plot classical kernel
    im2 = axes[1].imshow(K_classical, cmap='viridis', vmin=0, vmax=1)
    axes[1].set_title('Classical Kernel')
    axes[1].set_xlabel('Sample Index')
    axes[1].set_ylabel('Sample Index')
    plt.colorbar(im2, ax=axes[1])

    # Plot difference
    diff = K_quantum - K_classical
    im3 = axes[2].imshow(diff, cmap='RdBu', vmin=-1, vmax=1)
    axes[2].set_title('Difference (Q - C)')
    axes[2].set_xlabel('Sample Index')
    axes[2].set_ylabel('Sample Index')
    plt.colorbar(im3, ax=axes[2])

    plt.tight_layout()

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

plot_expressibility_analysis

plot_expressibility_analysis(embeddings, expressibility_scores, output_file=None, figsize=(10, 6))

Plot expressibility analysis across different embeddings.

Parameters:

Name Type Description Default
embeddings list of str

Embedding names

required
expressibility_scores list of float

Expressibility scores for each embedding

required
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(10, 6)
Source code in quantum_data_embedding_suite\visualization.py
def plot_expressibility_analysis(
    embeddings: List[str],
    expressibility_scores: List[float],
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (10, 6)
) -> None:
    """
    Plot expressibility analysis across different embeddings.

    Parameters
    ----------
    embeddings : list of str
        Embedding names
    expressibility_scores : list of float
        Expressibility scores for each embedding
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(10, 6)
        Figure size (width, height)
    """
    fig, ax = plt.subplots(figsize=figsize)

    colors = plt.cm.Set3(np.linspace(0, 1, len(embeddings)))
    bars = ax.bar(embeddings, expressibility_scores, color=colors)

    # Add value labels on bars
    for bar, score in zip(bars, expressibility_scores):
        height = bar.get_height()
        ax.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                f'{score:.3f}', ha='center', va='bottom')

    ax.set_ylabel('Expressibility Score')
    ax.set_title('Expressibility Comparison Across Embeddings')
    ax.set_ylim(0, 1.1)

    # Add horizontal line at 0.5 for reference
    ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.7, label='Baseline')
    ax.legend()

    plt.xticks(rotation=45, ha='right')
    plt.tight_layout()

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

plot_kernel_eigenspectrum

plot_kernel_eigenspectrum(K, title='Kernel Eigenspectrum', output_file=None, figsize=(8, 6))

Plot eigenspectrum of a kernel matrix.

Parameters:

Name Type Description Default
K array - like

Kernel matrix

required
title str

Plot title

"Kernel Eigenspectrum"
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(8, 6)
Source code in quantum_data_embedding_suite\visualization.py
def plot_kernel_eigenspectrum(
    K: np.ndarray,
    title: str = "Kernel Eigenspectrum",
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (8, 6)
) -> None:
    """
    Plot eigenspectrum of a kernel matrix.

    Parameters
    ----------
    K : array-like
        Kernel matrix
    title : str, default="Kernel Eigenspectrum"
        Plot title
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(8, 6)
        Figure size (width, height)
    """
    # Compute eigenvalues
    eigenvals = np.linalg.eigvals(K)
    eigenvals = np.real(eigenvals)
    eigenvals = np.sort(eigenvals)[::-1]  # Sort in descending order

    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)

    # Plot eigenvalues
    ax1.plot(eigenvals, 'o-', markersize=4)
    ax1.set_xlabel('Eigenvalue Index')
    ax1.set_ylabel('Eigenvalue')
    ax1.set_title('Eigenvalues')
    ax1.grid(True, alpha=0.3)

    # Plot cumulative variance explained
    cumvar = np.cumsum(eigenvals) / np.sum(eigenvals)
    ax2.plot(cumvar, 'o-', markersize=4)
    ax2.axhline(y=0.95, color='red', linestyle='--', alpha=0.7, label='95% threshold')
    ax2.set_xlabel('Eigenvalue Index')
    ax2.set_ylabel('Cumulative Variance Explained')
    ax2.set_title('Cumulative Variance')
    ax2.grid(True, alpha=0.3)
    ax2.legend()

    plt.suptitle(title)
    plt.tight_layout()

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

plot_embedding_landscape

plot_embedding_landscape(X, y=None, embedding_type='Unknown', output_file=None, figsize=(10, 8))

Plot 2D visualization of the embedding landscape.

Parameters:

Name Type Description Default
X array - like

Data points (will be reduced to 2D if necessary)

required
y array - like

Labels for coloring

None
embedding_type str

Type of embedding for plot title

"Unknown"
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(10, 8)
Source code in quantum_data_embedding_suite\visualization.py
def plot_embedding_landscape(
    X: np.ndarray,
    y: Optional[np.ndarray] = None,
    embedding_type: str = "Unknown",
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (10, 8)
) -> None:
    """
    Plot 2D visualization of the embedding landscape.

    Parameters
    ----------
    X : array-like
        Data points (will be reduced to 2D if necessary)
    y : array-like, optional
        Labels for coloring
    embedding_type : str, default="Unknown"
        Type of embedding for plot title
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(10, 8)
        Figure size (width, height)
    """
    # Reduce to 2D if necessary
    if X.shape[1] > 2:
        try:
            from sklearn.decomposition import PCA
            pca = PCA(n_components=2)
            X_2d = pca.fit_transform(X)
            variance_explained = pca.explained_variance_ratio_
            subtitle = f"PCA projection (explains {np.sum(variance_explained):.1%} variance)"
        except ImportError:
            # Fallback: just take first two dimensions
            X_2d = X[:, :2]
            subtitle = "First two dimensions"
    else:
        X_2d = X
        subtitle = "Original 2D data"

    fig, ax = plt.subplots(figsize=figsize)

    if y is not None:
        scatter = ax.scatter(X_2d[:, 0], X_2d[:, 1], c=y, cmap='tab10', s=50, alpha=0.7)
        plt.colorbar(scatter, ax=ax, label='Class')
    else:
        ax.scatter(X_2d[:, 0], X_2d[:, 1], s=50, alpha=0.7)

    ax.set_xlabel('Component 1')
    ax.set_ylabel('Component 2')
    ax.set_title(f'{embedding_type} Embedding Landscape\n{subtitle}')
    ax.grid(True, alpha=0.3)

    plt.tight_layout()

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

plot_gradient_analysis

plot_gradient_analysis(gradients, parameter_names=None, output_file=None, figsize=(12, 5))

Plot gradient analysis for trainability assessment.

Parameters:

Name Type Description Default
gradients array - like

Gradient values (can be 1D or 2D)

required
parameter_names list of str

Names of parameters

None
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(12, 5)
Source code in quantum_data_embedding_suite\visualization.py
def plot_gradient_analysis(
    gradients: np.ndarray,
    parameter_names: Optional[List[str]] = None,
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (12, 5)
) -> None:
    """
    Plot gradient analysis for trainability assessment.

    Parameters
    ----------
    gradients : array-like
        Gradient values (can be 1D or 2D)
    parameter_names : list of str, optional
        Names of parameters
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(12, 5)
        Figure size (width, height)
    """
    if gradients.ndim == 1:
        gradients = gradients.reshape(-1, 1)

    fig, axes = plt.subplots(1, 2, figsize=figsize)

    # Plot gradient magnitudes
    grad_magnitudes = np.linalg.norm(gradients, axis=1)
    axes[0].hist(grad_magnitudes, bins=30, alpha=0.7, edgecolor='black')
    axes[0].set_xlabel('Gradient Magnitude')
    axes[0].set_ylabel('Frequency')
    axes[0].set_title('Gradient Magnitude Distribution')
    axes[0].grid(True, alpha=0.3)

    # Plot gradient variance per parameter
    if gradients.shape[1] > 1:
        grad_vars = np.var(gradients, axis=0)
        x_pos = range(len(grad_vars))
        axes[1].bar(x_pos, grad_vars)
        axes[1].set_xlabel('Parameter Index')
        axes[1].set_ylabel('Gradient Variance')
        axes[1].set_title('Gradient Variance per Parameter')

        if parameter_names:
            axes[1].set_xticks(x_pos)
            axes[1].set_xticklabels(parameter_names, rotation=45, ha='right')
    else:
        # Single parameter case
        axes[1].plot(gradients[:, 0], 'o-', markersize=3)
        axes[1].set_xlabel('Sample')
        axes[1].set_ylabel('Gradient Value')
        axes[1].set_title('Gradient Evolution')
        axes[1].grid(True, alpha=0.3)

    plt.tight_layout()

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

plot_kernel_alignment_heatmap

plot_kernel_alignment_heatmap(alignments, embedding_names, output_file=None, figsize=(8, 6))

Plot kernel alignment heatmap between different embeddings.

Parameters:

Name Type Description Default
alignments array - like

Kernel alignment matrix

required
embedding_names list of str

Names of embeddings

required
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(8, 6)
Source code in quantum_data_embedding_suite\visualization.py
def plot_kernel_alignment_heatmap(
    alignments: np.ndarray,
    embedding_names: List[str],
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (8, 6)
) -> None:
    """
    Plot kernel alignment heatmap between different embeddings.

    Parameters
    ----------
    alignments : array-like
        Kernel alignment matrix
    embedding_names : list of str
        Names of embeddings
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(8, 6)
        Figure size (width, height)
    """
    fig, ax = plt.subplots(figsize=figsize)

    # Create heatmap
    im = ax.imshow(alignments, cmap='viridis', vmin=0, vmax=1)

    # Add colorbar
    cbar = plt.colorbar(im, ax=ax)
    cbar.set_label('Kernel Alignment', rotation=270, labelpad=20)

    # Set ticks and labels
    ax.set_xticks(range(len(embedding_names)))
    ax.set_yticks(range(len(embedding_names)))
    ax.set_xticklabels(embedding_names, rotation=45, ha='right')
    ax.set_yticklabels(embedding_names)

    # Add text annotations
    for i in range(len(embedding_names)):
        for j in range(len(embedding_names)):
            text = ax.text(j, i, f'{alignments[i, j]:.3f}',
                          ha="center", va="center", color="white" if alignments[i, j] < 0.5 else "black")

    ax.set_title('Kernel Alignment Between Embeddings')
    plt.tight_layout()

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

create_embedding_dashboard

create_embedding_dashboard(results, output_file=None, figsize=(16, 12))

Create a comprehensive dashboard for embedding analysis.

Parameters:

Name Type Description Default
results dict

Dictionary containing analysis results

required
output_file str

Path to save the plot

None
figsize tuple

Figure size (width, height)

(16, 12)
Source code in quantum_data_embedding_suite\visualization.py
@requires_license(features=["pro"])
def create_embedding_dashboard(
    results: dict,
    output_file: Optional[str] = None,
    figsize: Tuple[int, int] = (16, 12)
) -> None:
    """
    Create a comprehensive dashboard for embedding analysis.

    Parameters
    ----------
    results : dict
        Dictionary containing analysis results
    output_file : str, optional
        Path to save the plot
    figsize : tuple, default=(16, 12)
        Figure size (width, height)
    """
    fig = plt.figure(figsize=figsize)

    # Create subplot grid
    gs = fig.add_gridspec(3, 3, hspace=0.3, wspace=0.3)

    # Plot 1: Kernel matrix
    if 'kernel_matrix' in results:
        ax1 = fig.add_subplot(gs[0, 0])
        im = ax1.imshow(results['kernel_matrix'], cmap='viridis')
        ax1.set_title('Quantum Kernel Matrix')
        plt.colorbar(im, ax=ax1, fraction=0.046, pad=0.04)

    # Plot 2: Eigenspectrum
    if 'eigenvalues' in results:
        ax2 = fig.add_subplot(gs[0, 1])
        eigenvals = results['eigenvalues']
        ax2.plot(eigenvals, 'o-', markersize=4)
        ax2.set_title('Kernel Eigenspectrum')
        ax2.set_xlabel('Index')
        ax2.set_ylabel('Eigenvalue')
        ax2.grid(True, alpha=0.3)

    # Plot 3: Metrics comparison
    if 'metrics' in results:
        ax3 = fig.add_subplot(gs[0, 2])
        metrics = results['metrics']
        metric_names = list(metrics.keys())
        metric_values = list(metrics.values())

        bars = ax3.bar(range(len(metric_names)), metric_values)
        ax3.set_title('Embedding Metrics')
        ax3.set_xticks(range(len(metric_names)))
        ax3.set_xticklabels(metric_names, rotation=45, ha='right')
        ax3.set_ylabel('Score')

    # Add more plots as needed...

    plt.suptitle('Quantum Embedding Analysis Dashboard', fontsize=16)

    if output_file:
        plt.savefig(output_file, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()

create_interactive_kernel_plot

create_interactive_kernel_plot(K, labels=None)

Create interactive kernel matrix plot using Plotly.

Parameters:

Name Type Description Default
K array - like

Kernel matrix

required
labels array - like

Sample labels

None

Returns:

Name Type Description
fig plotly figure

Interactive plot figure

Source code in quantum_data_embedding_suite\visualization.py
def create_interactive_kernel_plot(K: np.ndarray, labels: Optional[np.ndarray] = None) -> Any:
    """
    Create interactive kernel matrix plot using Plotly.

    Parameters
    ----------
    K : array-like
        Kernel matrix
    labels : array-like, optional
        Sample labels

    Returns
    -------
    fig : plotly figure
        Interactive plot figure
    """
    try:
        import plotly.graph_objects as go
        import plotly.express as px

        # Create heatmap
        fig = go.Figure(data=go.Heatmap(
            z=K,
            colorscale='Viridis',
            showscale=True,
            hovertemplate='Sample %{x} vs Sample %{y}<br>Kernel Value: %{z:.3f}<extra></extra>'
        ))

        fig.update_layout(
            title='Interactive Quantum Kernel Matrix',
            xaxis_title='Sample Index',
            yaxis_title='Sample Index',
            width=600,
            height=600
        )

        return fig

    except ImportError:
        warnings.warn("Plotly not available, returning None")
        return None