Skip to content
3 hidden cells
Base ViT model, V.01 ground zero
add text here
3 hidden cells
To create dummy data for a Vision Transformer (ViT) model, we can generate random images using NumPy. These images will be used as input data for the model.
import numpy as np
import matplotlib.pyplot as plt
import h5py
# Function to generate random images
def generate_dummy_images(num_images, image_size):
"""
Generate a set of random images.
Parameters:
num_images (int): Number of images to generate.
image_size (tuple): Size of each image (height, width, channels).
Returns:
np.ndarray: Array of random images.
"""
return np.random.rand(num_images, *image_size)
# Function to generate random labels
def generate_dummy_labels(num_labels):
"""
Generate a set of random labels (0 or 1).
Parameters:
num_labels (int): Number of labels to generate.
Returns:
np.ndarray: Array of random labels.
"""
return np.random.randint(0, 2, num_labels)
# Generate 10 random images of size 224x224 with 3 color channels (RGB)
dummy_images = generate_dummy_images(10, (224, 224, 3))
# Generate 10 random labels
dummy_labels = generate_dummy_labels(10)
# Save the images and labels to a file called train_data.h5
with h5py.File('train_data.h5', 'w') as f:
f.create_dataset('images', data=dummy_images)
f.create_dataset('labels', data=dummy_labels)
# Display the first dummy image
plt.imshow(dummy_images[0])
plt.title('Dummy Image')
plt.axis('off')
plt.show()!pip install timmHidden output
# Vision Transformer (ViT) Model Implementation for Liver Cancer Thesis
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import matplotlib.pyplot as plt
import seaborn as sns
from timm.models.vision_transformer import vit_base_patch16_224
import h5py
import numpy as np
from torch.utils.data import Dataset
# Custom Dataset for HDF5 files
class HDF5Dataset(Dataset):
def __init__(self, file_path, transform=None):
self.file_path = file_path
self.transform = transform
with h5py.File(self.file_path, 'r') as f:
self.images = np.array(f['images'])
self.labels = np.array(f['labels'])
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx]
label = self.labels[idx]
if self.transform:
image = self.transform(image)
return image, label
# Model Definition
class ViTBinaryClassifier(nn.Module):
def __init__(self):
super(ViTBinaryClassifier, self).__init__()
self.vit = vit_base_patch16_224(pretrained=True)
self.vit.head = nn.Sequential(
nn.Linear(self.vit.head.in_features, 2),
nn.Softmax(dim=1)
)
def forward(self, x):
return self.vit(x)
# Image Preprocessing Transform
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ColorJitter(brightness=0.1, contrast=0.1),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5]*3, std=[0.5]*3)
])
# Load Dataset and Create Dataloader
train_dataset = HDF5Dataset('train_data.h5', transform=transform)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
# Training Function
def train(model, loader, optimizer, criterion, device):
model.train()
for images, labels in loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Model Instantiation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ViTBinaryClassifier().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=3e-4, weight_decay=0.01)
# TorchScript Export
scripted_model = torch.jit.script(model)
torch.jit.save(scripted_model, 'vit_model.pt')
# ONNX Export
dummy_input = torch.randn(1, 3, 224, 224).to(device)
torch.onnx.export(model, dummy_input, 'vit_model.onnx',
input_names=['input'], output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
# Attention Visualization
def visualize_cls_attention(model):
model.eval()
with torch.no_grad():
attn_weights = model.vit.blocks[0].attn.get_attn()
cls_attention = attn_weights[:, :, 0, 1:].mean(dim=1).squeeze()
cls_attention = cls_attention.reshape(14, 14).cpu()
sns.heatmap(cls_attention, cmap='viridis')
plt.title('CLS Attention Map')
plt.show()# ----------------------------
# RAG + RLHF Conceptual Integration
# ----------------------------
# RAG: Retrieval-Augmented Embedding Step
# Using ViT [CLS] embeddings with FAISS for context retrieval
import numpy as np
# Check if faiss is installed, if not, install it
try:
import faiss
except ImportError:
!pip install faiss-cpu
import faiss
def extract_cls_embedding(model, image_tensor):
model.eval()
with torch.no_grad():
features = model.vit.forward_features(image_tensor.unsqueeze(0))
return features[0].cpu().numpy() # CLS token
# Build FAISS index for similarity search
def build_faiss_index(embedding_list):
dim = embedding_list[0].shape[0]
index = faiss.IndexFlatL2(dim)
index.add(np.stack(embedding_list))
return index
# RLHF: Simulate human feedback and reward tuning
def compute_rlhf_reward(model_output, expert_label, model_confidence):
agreement = 1.0 if torch.argmax(model_output) == expert_label else -1.0
confidence = torch.max(model_output).item()
disagreement_penalty = 1.0 - confidence
alpha, beta, gamma = 1.0, 0.5, 0.3 # Tuned weights
reward = alpha * agreement + beta * confidence - gamma * disagreement_penalty
return reward
# Policy gradient update placeholder for RLHF loop
# (Would require PPO setup for full implementation)
def update_policy(model, reward):
# Placeholder: In actual RLHF, use PPO or Actor-Critic here
print(f'Reward: {reward:.3f} — used to refine policy.')
# Example usage
# image_tensor = transform(image).to(device)
# cls_embed = extract_cls_embedding(model, image_tensor)
# index = build_faiss_index([cls_embed])
# retrieved_context = index.search(np.expand_dims(cls_embed, axis=0), k=5)
# model_output = model(image_tensor.unsqueeze(0))
# reward = compute_rlhf_reward(model_output, expert_label=1, model_confidence=torch.max(model_output).item())
# update_policy(model, reward)