diff --git a/setup/05_accelerator_processors/01_habana_processing_unit/README.md b/setup/05_accelerator_processors/01_habana_processing_unit/README.md new file mode 100644 index 000000000..6842dfa64 --- /dev/null +++ b/setup/05_accelerator_processors/01_habana_processing_unit/README.md @@ -0,0 +1,51 @@ +# Running Code on Habana Gaudi (HPU) + +This directory contains instructions for running inference part from [Chapter 6](../../../ch06/01_main-chapter-code/ch06.ipynb) on Habana Gaudi processors. The code demonstrates how to leverage HPU acceleration. + +## Prerequisites + +1. **Habana Driver and Libraries** + Make sure you have the correct driver and libraries installed for Gaudi processors. You can follow the official installation guide from Habana Labs: + [Habana Labs Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) + +2. **SynapseAI SDK** + The SynapseAI SDK includes the compiler, runtime, and various libraries needed to compile and run models on Gaudi hardware. + +### Note +If you're using environment with Gaudi HPU instances - this environment probably already has PyTorch version preinstalled (eg. version 2.4.0a0+git74cd574) and this version is optimized for Habana Gaudi processors, so it is important that you do not install another version of PyTorch. Hence, in this folder you'll find another `requirements.txt` file that does not include PyTorch. + + +## Getting Started +1. **Model Configuration** + The code supports various GPT-2 model sizes: + - GPT-2 Small (124M parameters) + - GPT-2 Medium (355M parameters) + - GPT-2 Large (774M parameters) + - GPT-2 XL (1558M parameters) + +2. **Running the Code** + + *Note: We assume that you have already downloaded the model weights and placed them in the `gpt2` directory inside this folder. Additionally, we use `review_classifier.pth` weights created in [Chapter 6](../../../ch06/01_main-chapter-code/ch06.ipynb), so you don't need to download them separately. Just copy and paste the `review_classifier.pth` file into this folder.* + - Open the `inference_on_gaudi.ipynb` notebook + - Follow the cells to: + - Initialize the HPU device + - Load and configure the model + - Run inference on the Gaudi processor + +3. **Performance Monitoring** + The notebook includes performance comparison tools to measure inference time on CPU vs HPU + +## Code Structure + +- `inference_on_gaudi.ipynb`: Main notebook for running inference on Gaudi +- `previous_chapters.py`: Supporting code from Chapter 6 + +## Troubleshooting + +- **Driver Issues**: Make sure the driver version matches the SDK version. +- **Performance**: For optimal performance, monitor logs and use Habana's profiling tools to identify bottlenecks. + +## Additional Resources + +- [Habana Developer Site](https://developer.habana.ai/) +- [SynapseAI Reference](https://docs.habana.ai/en/latest/) diff --git a/setup/05_accelerator_processors/01_habana_processing_unit/gpt_download.py b/setup/05_accelerator_processors/01_habana_processing_unit/gpt_download.py new file mode 100644 index 000000000..2291bc872 --- /dev/null +++ b/setup/05_accelerator_processors/01_habana_processing_unit/gpt_download.py @@ -0,0 +1,157 @@ +# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt). +# Source for "Build a Large Language Model From Scratch" +# - https://www.manning.com/books/build-a-large-language-model-from-scratch +# Code: https://github.com/rasbt/LLMs-from-scratch + + +import os +import urllib.request + +# import requests +import json +import numpy as np +import tensorflow as tf +from tqdm import tqdm + + +def download_and_load_gpt2(model_size, models_dir): + # Validate model size + allowed_sizes = ("124M", "355M", "774M", "1558M") + if model_size not in allowed_sizes: + raise ValueError(f"Model size not in {allowed_sizes}") + + # Define paths + model_dir = os.path.join(models_dir, model_size) + base_url = "https://openaipublic.blob.core.windows.net/gpt-2/models" + backup_base_url = "https://f001.backblazeb2.com/file/LLMs-from-scratch/gpt2" + filenames = [ + "checkpoint", "encoder.json", "hparams.json", + "model.ckpt.data-00000-of-00001", "model.ckpt.index", + "model.ckpt.meta", "vocab.bpe" + ] + + # Download files + os.makedirs(model_dir, exist_ok=True) + for filename in filenames: + file_url = os.path.join(base_url, model_size, filename) + backup_url = os.path.join(backup_base_url, model_size, filename) + file_path = os.path.join(model_dir, filename) + download_file(file_url, file_path, backup_url) + + # Load settings and params + tf_ckpt_path = tf.train.latest_checkpoint(model_dir) + settings = json.load(open(os.path.join(model_dir, "hparams.json"))) + params = load_gpt2_params_from_tf_ckpt(tf_ckpt_path, settings) + + return settings, params + + +def download_file(url, destination, backup_url=None): + def _attempt_download(download_url): + with urllib.request.urlopen(download_url) as response: + # Get the total file size from headers, defaulting to 0 if not present + file_size = int(response.headers.get("Content-Length", 0)) + + # Check if file exists and has the same size + if os.path.exists(destination): + file_size_local = os.path.getsize(destination) + if file_size == file_size_local: + print(f"File already exists and is up-to-date: {destination}") + return True # Indicate success without re-downloading + + block_size = 1024 # 1 Kilobyte + + # Initialize the progress bar with total file size + progress_bar_description = os.path.basename(download_url) + with tqdm(total=file_size, unit="iB", unit_scale=True, desc=progress_bar_description) as progress_bar: + with open(destination, "wb") as file: + while True: + chunk = response.read(block_size) + if not chunk: + break + file.write(chunk) + progress_bar.update(len(chunk)) + return True + + try: + if _attempt_download(url): + return + except (urllib.error.HTTPError, urllib.error.URLError): + if backup_url is not None: + print(f"Primary URL ({url}) failed. Attempting backup URL: {backup_url}") + try: + if _attempt_download(backup_url): + return + except urllib.error.HTTPError: + pass + + # If we reach here, both attempts have failed + error_message = ( + f"Failed to download from both primary URL ({url})" + f"{' and backup URL (' + backup_url + ')' if backup_url else ''}." + "\nCheck your internet connection or the file availability.\n" + "For help, visit: https://github.com/rasbt/LLMs-from-scratch/discussions/273" + ) + print(error_message) + except Exception as e: + print(f"An unexpected error occurred: {e}") + + +# Alternative way using `requests` +""" +def download_file(url, destination): + # Send a GET request to download the file in streaming mode + response = requests.get(url, stream=True) + + # Get the total file size from headers, defaulting to 0 if not present + file_size = int(response.headers.get("content-length", 0)) + + # Check if file exists and has the same size + if os.path.exists(destination): + file_size_local = os.path.getsize(destination) + if file_size == file_size_local: + print(f"File already exists and is up-to-date: {destination}") + return + + # Define the block size for reading the file + block_size = 1024 # 1 Kilobyte + + # Initialize the progress bar with total file size + progress_bar_description = url.split("/")[-1] # Extract filename from URL + with tqdm(total=file_size, unit="iB", unit_scale=True, desc=progress_bar_description) as progress_bar: + # Open the destination file in binary write mode + with open(destination, "wb") as file: + # Iterate over the file data in chunks + for chunk in response.iter_content(block_size): + progress_bar.update(len(chunk)) # Update progress bar + file.write(chunk) # Write the chunk to the file +""" + + +def load_gpt2_params_from_tf_ckpt(ckpt_path, settings): + # Initialize parameters dictionary with empty blocks for each layer + params = {"blocks": [{} for _ in range(settings["n_layer"])]} + + # Iterate over each variable in the checkpoint + for name, _ in tf.train.list_variables(ckpt_path): + # Load the variable and remove singleton dimensions + variable_array = np.squeeze(tf.train.load_variable(ckpt_path, name)) + + # Process the variable name to extract relevant parts + variable_name_parts = name.split("/")[1:] # Skip the 'model/' prefix + + # Identify the target dictionary for the variable + target_dict = params + if variable_name_parts[0].startswith("h"): + layer_number = int(variable_name_parts[0][1:]) + target_dict = params["blocks"][layer_number] + + # Recursively access or create nested dictionaries + for key in variable_name_parts[1:-1]: + target_dict = target_dict.setdefault(key, {}) + + # Assign the variable array to the last key + last_key = variable_name_parts[-1] + target_dict[last_key] = variable_array + + return params diff --git a/setup/05_accelerator_processors/01_habana_processing_unit/inference_on_gaudi.ipynb b/setup/05_accelerator_processors/01_habana_processing_unit/inference_on_gaudi.ipynb new file mode 100644 index 000000000..ac8425298 --- /dev/null +++ b/setup/05_accelerator_processors/01_habana_processing_unit/inference_on_gaudi.ipynb @@ -0,0 +1,667 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Running Inference on Habana Gaudi (HPU)\n", + "\n", + "This notebook demonstrates how to run inference using the Habana Gaudi processor (HPU) with our GPT model for classification task from Chapter 6." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Construct model (same as in Chapter 6)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# import necessary libraries\n", + "import habana_frameworks.torch as ht # import Habana PyTorch framework first\n", + "import torch\n", + "import tiktoken" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize tokenizer\n", + "tokenizer = tiktoken.get_encoding(\"gpt2\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# model configuration and parameters\n", + "CHOOSE_MODEL = \"gpt2-small (124M)\"\n", + "INPUT_PROMPT = \"Every effort moves\"\n", + "\n", + "BASE_CONFIG = {\n", + " \"vocab_size\": 50257, # Vocabulary size\n", + " \"context_length\": 1024, # Context length\n", + " \"drop_rate\": 0.0, # Dropout rate\n", + " \"qkv_bias\": True # Query-key-value bias\n", + "}\n", + "\n", + "model_configs = {\n", + " \"gpt2-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n", + " \"gpt2-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n", + " \"gpt2-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n", + " \"gpt2-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n", + "}\n", + "\n", + "BASE_CONFIG.update(model_configs[CHOOSE_MODEL])" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# use functions from previous chapters to download and load the model\n", + "from previous_chapters import GPTModel\n", + "\n", + "model_size = CHOOSE_MODEL.split(\" \")[-1].lstrip(\"(\").rstrip(\")\")\n", + "\n", + "model = GPTModel(BASE_CONFIG)\n", + "\n", + "# add a new output head to the model (same as in Chapter 6)\n", + "model.out_head = torch.nn.Linear(in_features=BASE_CONFIG[\"emb_dim\"], out_features=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Load model weights\n", + "We use the weights saved when we trained the model in Chapter 6 on spam classification task." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model_state_dict = torch.load(\"review_classifier.pth\", map_location=torch.device(\"cpu\"), weights_only=True) # load weights to CPU to avoid memory issues\n", + "model.load_state_dict(model_state_dict) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Move model to HPU\n", + "You have to have access to HPU!" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "============================= HABANA PT BRIDGE CONFIGURATION =========================== \n", + " PT_HPU_LAZY_MODE = 1\n", + " PT_RECIPE_CACHE_PATH = \n", + " PT_CACHE_FOLDER_DELETE = 0\n", + " PT_HPU_RECIPE_CACHE_CONFIG = \n", + " PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807\n", + " PT_HPU_LAZY_ACC_PAR_MODE = 1\n", + " PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0\n", + " PT_HPU_EAGER_PIPELINE_ENABLE = 1\n", + " PT_HPU_EAGER_COLLECTIVE_PIPELINE_ENABLE = 1\n", + "---------------------------: System Configuration :---------------------------\n", + "Num CPU Cores : 152\n", + "CPU RAM : 1056439544 KB\n", + "------------------------------------------------------------------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "GPTModel(\n", + " (tok_emb): Embedding(50257, 768)\n", + " (pos_emb): Embedding(1024, 768)\n", + " (drop_emb): Dropout(p=0.0, inplace=False)\n", + " (trf_blocks): Sequential(\n", + " (0): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (1): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (2): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (3): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (4): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (5): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (6): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (7): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (8): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (9): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (10): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (11): TransformerBlock(\n", + " (att): MultiHeadAttention(\n", + " (W_query): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_key): Linear(in_features=768, out_features=768, bias=True)\n", + " (W_value): Linear(in_features=768, out_features=768, bias=True)\n", + " (out_proj): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (ff): FeedForward(\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=768, out_features=3072, bias=True)\n", + " (1): GELU()\n", + " (2): Linear(in_features=3072, out_features=768, bias=True)\n", + " )\n", + " )\n", + " (norm1): LayerNorm()\n", + " (norm2): LayerNorm()\n", + " (drop_resid): Dropout(p=0.0, inplace=False)\n", + " )\n", + " )\n", + " (final_norm): LayerNorm()\n", + " (out_head): Linear(in_features=768, out_features=2, bias=True)\n", + ")" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "device = torch.device(\"hpu\")\n", + "model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Classify reviews function\n", + "Same as in Chapter 6" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def classify_review(text, model, tokenizer, device, max_length=None, pad_token_id=50256):\n", + " model.eval()\n", + "\n", + " # Prepare inputs to the model\n", + " input_ids = tokenizer.encode(text)\n", + " supported_context_length = model.pos_emb.weight.shape[0]\n", + " # Note: In the book, this was originally written as pos_emb.weight.shape[1] by mistake\n", + " # It didn't break the code but would have caused unnecessary truncation (to 768 instead of 1024)\n", + "\n", + " # Truncate sequences if they too long\n", + " input_ids = input_ids[:min(max_length, supported_context_length)]\n", + "\n", + " # Pad sequences to the longest sequence\n", + " input_ids += [pad_token_id] * (max_length - len(input_ids))\n", + " input_tensor = torch.tensor(input_ids, device=device).unsqueeze(0) # add batch dimension\n", + "\n", + " # Model inference\n", + " with torch.no_grad():\n", + " logits = model(input_tensor)[:, -1, :] # Logits of the last output token\n", + " predicted_label = torch.argmax(logits, dim=-1).item()\n", + "\n", + " # Return the classified result\n", + " return \"spam\" if predicted_label == 1 else \"not spam\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Test the model" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "spam\n" + ] + } + ], + "source": [ + "text_1 = (\n", + " \"You are a winner you have been specially\"\n", + " \" selected to receive $1000 cash or a $2000 award.\"\n", + ")\n", + "\n", + "print(classify_review(\n", + " text_1, model, tokenizer, device, max_length=120\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "not spam\n" + ] + } + ], + "source": [ + "text_2 = (\n", + " \"Hey, just wanted to check if we're still on\"\n", + " \" for dinner tonight? Let me know!\"\n", + ")\n", + "\n", + "print(classify_review(\n", + " text_2, model, tokenizer, device, max_length=120\n", + "))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It works!\n", + "Now let's compare the performance of the model on CPU and HPU." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Performance comparison\n", + "We will use the `time` library to measure the time it takes to classify a review." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU time: 5.098230602893423 seconds per message\n", + "HPU time: 0.08832299709320068 seconds per message\n", + "HPU faster by 57.72x\n" + ] + } + ], + "source": [ + "import time\n", + "\n", + "# Let's create a couple of spam/not spam messages\n", + "messages = [\n", + " \"You are a winner you have been specially selected to receive $1000 cash or a $2000 award.\", # Spam\n", + " \"Please send me your bank details so I can transfer you the money.\", # Spam\n", + " \"I'm going to the gym now, want to join?\", # Not spam\n", + " \"This is not spam, it's a test message.\", # Not spam\n", + " \"Congratulations! You've won a free iPhone! Click here to claim.\", # Spam\n", + " \"Hey, are we still on for lunch tomorrow?\", # Not spam\n", + " \"URGENT! Your account has been compromised. Reset your password now!\", # Spam\n", + " \"Meeting rescheduled to 3 PM. Let me know if that works.\", # Not spam\n", + " \"Limited time offer! Buy now and get 50% off!\", # Spam\n", + " \"Can you review this document and send me your feedback?\", # Not spam\n", + " \"FREE MONEY! Click this link to receive your reward.\", # Spam\n", + " \"Your order has been shipped. Track it here.\", # Not spam\n", + " \"Earn $$$ from home! No experience needed. Sign up today.\", # Spam\n", + " \"Reminder: Your dentist appointment is on Monday at 10 AM.\", # Not spam\n", + " \"Exclusive deal just for you! Unlock your discount now!\", # Spam\n", + " \"Happy Birthday! Hope you have an amazing day!\", # Not spam\n", + " \"Claim your prize before it's too late! Act now!\", # Spam\n", + " \"Thanks for your help with the project. Appreciate it!\", # Not spam\n", + " \"Your PayP@l account needs verification! Click here immediately!\", # Spam\n", + " \"Hey, can you send me that report by EOD?\", # Not spam\n", + " \"Final warning! Your subscription will be canceled unless you act now!\", # Spam\n", + " \"Dinner plans tonight? Let me know.\", # Not spam\n", + " \"You have been selected for a special offer! Open now!\", # Spam\n", + " \"Let's catch up over coffee next week.\", # Not spam\n", + " \"Instant weight loss! See the miracle solution here.\", # Spam\n", + " \"Just checking in—how are you doing?\", # Not spam\n", + " \"Hurry! Stocks are running out. Order yours today!\", # Spam\n", + " \"Can you confirm the schedule for tomorrow?\", # Not spam\n", + " \"Dear user, your acc0unt has suspicious activity. Verify now!\", # Spam\n", + " \"Great job on the presentation today!\", # Not spam\n", + " \"Double your profits in just 7 days! Guaranteed!\", # Spam\n", + " \"I'll be late for the meeting, stuck in traffic.\", # Not spam\n", + " \"Secret investment opportunity—make millions fast!\", # Spam\n", + " \"Let's finalize the contract details this afternoon.\", # Not spam\n", + " \"Congratulations, you are the chosen winner of our lottery!\", # Spam\n", + " \"Thanks for your help with the budget analysis.\", # Not spam\n", + " \"Y0ur p@ckage is d3layed. Cl!ck here to f!x.\", # Spam\n", + " \"I’ll send over the revised slides shortly.\", # Not spam\n", + " \"Work from home and make $$$ instantly!\", # Spam\n", + " \"Can you join the call at 2 PM instead of 3?\", # Not spam\n", + " \"Limited seats available! Enroll in our exclusive program today.\", # Spam\n", + " \"See you at the event later!\", # Not spam\n", + " \"Hurry! This deal won’t last long. Act fast!\", # Spam\n", + " \"Your invoice for last month is attached.\", # Not spam\n", + " \"Boost your credit score instantly! Click here.\", # Spam\n", + " \"Looking forward to our meeting tomorrow.\", # Not spam\n", + " \"Your social media account has been hacked! Reset password now!\", # Spam\n", + " \"Let’s schedule a team lunch next week.\", # Not spam\n", + " \"Easy way to make extra cash online—start today!\", # Spam\n", + " \"Can you review the proposal before we submit?\", # Not spam\n", + " \"F!nal rem!nder: Update y0ur b@nk details NOW!\", # Spam\n", + " \"Thanks for the update on the project.\", # Not spam\n", + " \"Your subscription has been successfully renewed.\", # Not spam\n", + " \"Meet singles in your area now!\", # Spam\n", + " \"I left my laptop at the office, can you bring it?\", # Not spam\n", + " \"You are pre-approved for a low-interest loan!\", # Spam\n", + " \"Looking forward to your presentation next week!\", # Not spam\n", + " \"WIN a brand-new car! Just sign up!\", # Spam\n", + " \"Hope you’re feeling better today!\", # Not spam\n", + " \"Act fast! This offer expires soon!\", # Spam\n", + " \"Thanks for the great conversation earlier.\", # Not spam\n", + " \"Your p@ssw0rd will expire soon! Cl!ck here to reset.\", # Spam\n", + " \"Don’t forget our dinner plans tonight!\", # Not spam\n", + " \"Limited-time deal! Get yours now before it’s gone!\", # Spam\n", + " \"Have a safe flight!\", # Not spam\n", + " \"FREE investment tips! Join our webinar today!\", # Spam\n", + " \"Let me know if you need any help with the project.\", # Not spam\n", + " \"This is not a scam! You have won $1,000,000!\", # Spam\n", + " \"Excited to see you at the conference!\", # Not spam\n", + " \"You won’t believe this shocking weight loss secret!\", # Spam\n", + " \"Are you available for a quick call?\", # Not spam\n", + " \"Act n0w! Your acc0unt has been compromised!\", # Spam\n", + " \"Let’s meet at the usual coffee shop.\", # Not spam\n", + " \"Earn p@ssive inc0me with this one simple trick!\", # Spam\n", + " \"Thanks for helping me with the move.\", # Not spam\n", + " \"Your cl@im has been approved! Cl!ck here to get it.\", # Spam\n", + " \"Don’t miss out on this exclusive deal!\", # Spam\n", + " \"Let’s touch base later today.\", # Not spam\n", + " \"This is your last chance to claim your reward!\", # Spam\n", + " \"Great catching up with you yesterday!\", # Not spam\n", + " \"Cl@im y0ur refund now! L!mited time offer!\", # Spam\n", + " \"Important update regarding your bank account.\", # Spam\n", + " \"See you at the meeting in 10 minutes.\", # Not spam\n", + " \"Get rich quick with this foolproof method!\", # Spam\n", + " \"I’ll send you the details by email.\", # Not spam\n", + " \"Unbelievable investment opportunity—act now!\", # Spam\n", + " \"Don’t forget about the deadline tomorrow.\", # Not spam\n", + " \"Y0ur Netflix account is locked! Verify now!\", # Spam\n", + " \"Grab your free sample today!\", # Spam\n", + " \"I’ll share the report with you later.\", # Not spam\n", + " \"This stock is about to skyrocket! Invest today!\", # Spam\n", + " \"Reminder: Submit your expense report by Friday.\", # Not spam\n", + " \"Claim your Bitcoin bonus now!\", # Spam\n", + " \"XXX WEBSITE XXX\", # Spam\n", + " ]\n", + "\n", + "num_messages = len(messages)\n", + "\n", + "# Test on CPU\n", + "start_time = time.time()\n", + "cpu_results = [classify_review(msg, model.to('cpu'), tokenizer, device, max_length=120) for msg in messages]\n", + "end_time = time.time()\n", + "cpu_time = end_time - start_time\n", + "print(f\"CPU time: {cpu_time / num_messages} seconds per message\")\n", + "\n", + "# Test on HPU\n", + "start_time = time.time()\n", + "hpu_results = [classify_review(msg, model.to('hpu'), tokenizer, device, max_length=120) for msg in messages]\n", + "end_time = time.time()\n", + "hpu_time = end_time - start_time\n", + "print(f\"HPU time: {hpu_time / num_messages} seconds per message\")\n", + "\n", + "# Compare results\n", + "print(f\"HPU faster by {cpu_time / hpu_time:.2f}x\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/setup/05_accelerator_processors/01_habana_processing_unit/previous_chapters.py b/setup/05_accelerator_processors/01_habana_processing_unit/previous_chapters.py new file mode 100644 index 000000000..9f3d8e838 --- /dev/null +++ b/setup/05_accelerator_processors/01_habana_processing_unit/previous_chapters.py @@ -0,0 +1,320 @@ +# Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt). +# Source for "Build a Large Language Model From Scratch" +# - https://www.manning.com/books/build-a-large-language-model-from-scratch +# Code: https://github.com/rasbt/LLMs-from-scratch +# +# This file collects all the relevant code that we covered thus far +# throughout Chapters 2-5. +# This file can be run as a standalone script. + +import numpy as np +import tiktoken +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader + +##################################### +# Chapter 2 +##################################### + + +class GPTDatasetV1(Dataset): + def __init__(self, txt, tokenizer, max_length, stride): + self.input_ids = [] + self.target_ids = [] + + # Tokenize the entire text + token_ids = tokenizer.encode(txt, allowed_special={"<|endoftext|>"}) + + # Use a sliding window to chunk the book into overlapping sequences of max_length + for i in range(0, len(token_ids) - max_length, stride): + input_chunk = token_ids[i:i + max_length] + target_chunk = token_ids[i + 1: i + max_length + 1] + self.input_ids.append(torch.tensor(input_chunk)) + self.target_ids.append(torch.tensor(target_chunk)) + + def __len__(self): + return len(self.input_ids) + + def __getitem__(self, idx): + return self.input_ids[idx], self.target_ids[idx] + + +def create_dataloader_v1(txt, batch_size=4, max_length=256, + stride=128, shuffle=True, drop_last=True, num_workers=0): + # Initialize the tokenizer + tokenizer = tiktoken.get_encoding("gpt2") + + # Create dataset + dataset = GPTDatasetV1(txt, tokenizer, max_length, stride) + + # Create dataloader + dataloader = DataLoader( + dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, num_workers=num_workers) + + return dataloader + + +##################################### +# Chapter 3 +##################################### +class MultiHeadAttention(nn.Module): + def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False): + super().__init__() + assert d_out % num_heads == 0, "d_out must be divisible by n_heads" + + self.d_out = d_out + self.num_heads = num_heads + self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim + + self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias) + self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias) + self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias) + self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs + self.dropout = nn.Dropout(dropout) + self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1)) + + def forward(self, x): + b, num_tokens, d_in = x.shape + + keys = self.W_key(x) # Shape: (b, num_tokens, d_out) + queries = self.W_query(x) + values = self.W_value(x) + + # We implicitly split the matrix by adding a `num_heads` dimension + # Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim) + keys = keys.view(b, num_tokens, self.num_heads, self.head_dim) + values = values.view(b, num_tokens, self.num_heads, self.head_dim) + queries = queries.view(b, num_tokens, self.num_heads, self.head_dim) + + # Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim) + keys = keys.transpose(1, 2) + queries = queries.transpose(1, 2) + values = values.transpose(1, 2) + + # Compute scaled dot-product attention (aka self-attention) with a causal mask + attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head + + # Original mask truncated to the number of tokens and converted to boolean + mask_bool = self.mask.bool()[:num_tokens, :num_tokens] + + # Use the mask to fill attention scores + attn_scores.masked_fill_(mask_bool, -torch.inf) + + attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1) + attn_weights = self.dropout(attn_weights) + + # Shape: (b, num_tokens, num_heads, head_dim) + context_vec = (attn_weights @ values).transpose(1, 2) + + # Combine heads, where self.d_out = self.num_heads * self.head_dim + context_vec = context_vec.reshape(b, num_tokens, self.d_out) + context_vec = self.out_proj(context_vec) # optional projection + + return context_vec + + +##################################### +# Chapter 4 +##################################### +class LayerNorm(nn.Module): + def __init__(self, emb_dim): + super().__init__() + self.eps = 1e-5 + self.scale = nn.Parameter(torch.ones(emb_dim)) + self.shift = nn.Parameter(torch.zeros(emb_dim)) + + def forward(self, x): + mean = x.mean(dim=-1, keepdim=True) + var = x.var(dim=-1, keepdim=True, unbiased=False) + norm_x = (x - mean) / torch.sqrt(var + self.eps) + return self.scale * norm_x + self.shift + + +class GELU(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return 0.5 * x * (1 + torch.tanh( + torch.sqrt(torch.tensor(2.0 / torch.pi)) * + (x + 0.044715 * torch.pow(x, 3)) + )) + + +class FeedForward(nn.Module): + def __init__(self, cfg): + super().__init__() + self.layers = nn.Sequential( + nn.Linear(cfg["emb_dim"], 4 * cfg["emb_dim"]), + GELU(), + nn.Linear(4 * cfg["emb_dim"], cfg["emb_dim"]), + ) + + def forward(self, x): + return self.layers(x) + + +class TransformerBlock(nn.Module): + def __init__(self, cfg): + super().__init__() + self.att = MultiHeadAttention( + d_in=cfg["emb_dim"], + d_out=cfg["emb_dim"], + context_length=cfg["context_length"], + num_heads=cfg["n_heads"], + dropout=cfg["drop_rate"], + qkv_bias=cfg["qkv_bias"]) + self.ff = FeedForward(cfg) + self.norm1 = LayerNorm(cfg["emb_dim"]) + self.norm2 = LayerNorm(cfg["emb_dim"]) + self.drop_resid = nn.Dropout(cfg["drop_rate"]) + + def forward(self, x): + # Shortcut connection for attention block + shortcut = x + x = self.norm1(x) + x = self.att(x) # Shape [batch_size, num_tokens, emb_size] + x = self.drop_resid(x) + x = x + shortcut # Add the original input back + + # Shortcut connection for feed-forward block + shortcut = x + x = self.norm2(x) + x = self.ff(x) + x = self.drop_resid(x) + x = x + shortcut # Add the original input back + + return x + + +class GPTModel(nn.Module): + def __init__(self, cfg): + super().__init__() + self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"]) + self.pos_emb = nn.Embedding(cfg["context_length"], cfg["emb_dim"]) + self.drop_emb = nn.Dropout(cfg["drop_rate"]) + + self.trf_blocks = nn.Sequential( + *[TransformerBlock(cfg) for _ in range(cfg["n_layers"])]) + + self.final_norm = LayerNorm(cfg["emb_dim"]) + self.out_head = nn.Linear(cfg["emb_dim"], cfg["vocab_size"], bias=False) + + def forward(self, in_idx): + batch_size, seq_len = in_idx.shape + tok_embeds = self.tok_emb(in_idx) + pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device)) + x = tok_embeds + pos_embeds # Shape [batch_size, num_tokens, emb_size] + x = self.drop_emb(x) + x = self.trf_blocks(x) + x = self.final_norm(x) + logits = self.out_head(x) + return logits + + +def generate_text_simple(model, idx, max_new_tokens, context_size): + # idx is (B, T) array of indices in the current context + for _ in range(max_new_tokens): + + # Crop current context if it exceeds the supported context size + # E.g., if LLM supports only 5 tokens, and the context size is 10 + # then only the last 5 tokens are used as context + idx_cond = idx[:, -context_size:] + + # Get the predictions + with torch.no_grad(): + logits = model(idx_cond) + + # Focus only on the last time step + # (batch, n_token, vocab_size) becomes (batch, vocab_size) + logits = logits[:, -1, :] + + # Get the idx of the vocab entry with the highest logits value + idx_next = torch.argmax(logits, dim=-1, keepdim=True) # (batch, 1) + + # Append sampled index to the running sequence + idx = torch.cat((idx, idx_next), dim=1) # (batch, n_tokens+1) + + return idx + + +##################################### +# Chapter 5 +##################################### +def assign(left, right): + if left.shape != right.shape: + raise ValueError(f"Shape mismatch. Left: {left.shape}, Right: {right.shape}") + return torch.nn.Parameter(torch.tensor(right)) + + +def load_weights_into_gpt(gpt, params): + gpt.pos_emb.weight = assign(gpt.pos_emb.weight, params['wpe']) + gpt.tok_emb.weight = assign(gpt.tok_emb.weight, params['wte']) + + for b in range(len(params["blocks"])): + q_w, k_w, v_w = np.split( + (params["blocks"][b]["attn"]["c_attn"])["w"], 3, axis=-1) + gpt.trf_blocks[b].att.W_query.weight = assign( + gpt.trf_blocks[b].att.W_query.weight, q_w.T) + gpt.trf_blocks[b].att.W_key.weight = assign( + gpt.trf_blocks[b].att.W_key.weight, k_w.T) + gpt.trf_blocks[b].att.W_value.weight = assign( + gpt.trf_blocks[b].att.W_value.weight, v_w.T) + + q_b, k_b, v_b = np.split( + (params["blocks"][b]["attn"]["c_attn"])["b"], 3, axis=-1) + gpt.trf_blocks[b].att.W_query.bias = assign( + gpt.trf_blocks[b].att.W_query.bias, q_b) + gpt.trf_blocks[b].att.W_key.bias = assign( + gpt.trf_blocks[b].att.W_key.bias, k_b) + gpt.trf_blocks[b].att.W_value.bias = assign( + gpt.trf_blocks[b].att.W_value.bias, v_b) + + gpt.trf_blocks[b].att.out_proj.weight = assign( + gpt.trf_blocks[b].att.out_proj.weight, + params["blocks"][b]["attn"]["c_proj"]["w"].T) + gpt.trf_blocks[b].att.out_proj.bias = assign( + gpt.trf_blocks[b].att.out_proj.bias, + params["blocks"][b]["attn"]["c_proj"]["b"]) + + gpt.trf_blocks[b].ff.layers[0].weight = assign( + gpt.trf_blocks[b].ff.layers[0].weight, + params["blocks"][b]["mlp"]["c_fc"]["w"].T) + gpt.trf_blocks[b].ff.layers[0].bias = assign( + gpt.trf_blocks[b].ff.layers[0].bias, + params["blocks"][b]["mlp"]["c_fc"]["b"]) + gpt.trf_blocks[b].ff.layers[2].weight = assign( + gpt.trf_blocks[b].ff.layers[2].weight, + params["blocks"][b]["mlp"]["c_proj"]["w"].T) + gpt.trf_blocks[b].ff.layers[2].bias = assign( + gpt.trf_blocks[b].ff.layers[2].bias, + params["blocks"][b]["mlp"]["c_proj"]["b"]) + + gpt.trf_blocks[b].norm1.scale = assign( + gpt.trf_blocks[b].norm1.scale, + params["blocks"][b]["ln_1"]["g"]) + gpt.trf_blocks[b].norm1.shift = assign( + gpt.trf_blocks[b].norm1.shift, + params["blocks"][b]["ln_1"]["b"]) + gpt.trf_blocks[b].norm2.scale = assign( + gpt.trf_blocks[b].norm2.scale, + params["blocks"][b]["ln_2"]["g"]) + gpt.trf_blocks[b].norm2.shift = assign( + gpt.trf_blocks[b].norm2.shift, + params["blocks"][b]["ln_2"]["b"]) + + gpt.final_norm.scale = assign(gpt.final_norm.scale, params["g"]) + gpt.final_norm.shift = assign(gpt.final_norm.shift, params["b"]) + gpt.out_head.weight = assign(gpt.out_head.weight, params["wte"]) + + +def text_to_token_ids(text, tokenizer): + encoded = tokenizer.encode(text, allowed_special={'<|endoftext|>'}) + encoded_tensor = torch.tensor(encoded).unsqueeze(0) # add batch dimension + return encoded_tensor + + +def token_ids_to_text(token_ids, tokenizer): + flat = token_ids.squeeze(0) # remove batch dimension + return tokenizer.decode(flat.tolist()) diff --git a/setup/05_accelerator_processors/01_habana_processing_unit/requirements.txt b/setup/05_accelerator_processors/01_habana_processing_unit/requirements.txt new file mode 100644 index 000000000..bf781f163 --- /dev/null +++ b/setup/05_accelerator_processors/01_habana_processing_unit/requirements.txt @@ -0,0 +1,8 @@ +jupyterlab >= 4.0 # all +tiktoken >= 0.5.1 # ch02; ch04; ch05 +matplotlib >= 3.7.1 # ch04; ch06; ch07 +tqdm >= 4.66.1 # ch05; ch07 +tensorflow >= 2.18.0 +numpy<2.0 # dependency of several other libraries like torch and pandas +pandas >= 2.2.1 # ch06 +psutil >= 5.9.5 # ch07; already installed automatically as dependency of torch diff --git a/setup/05_accelerator_processors/README.md b/setup/05_accelerator_processors/README.md new file mode 100644 index 000000000..560b82152 --- /dev/null +++ b/setup/05_accelerator_processors/README.md @@ -0,0 +1,19 @@ +# Accelerator Processors + +This directory contains instructions and code examples for running the book's code on different hardware accelerators. + +## Current Implementations + +### HPU (Habana Gaudi) +Instructions and code examples for running the book's code on Habana Gaudi processors can be found in the [HPU](./01_habana_processing_unit) directory. + +## Additional Accelerators + +This section can be expanded with implementations for other accelerator processors, including but not limited to: + +- GPU (Graphics Processing Units) +- NPU (Neural Processing Units) +- TPU (Tensor Processing Units) +- Other specialized AI accelerators + +Additional accelerator implementations will help make this book's code more accessible across different hardware platforms. \ No newline at end of file