aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulian T <julian@jtle.dk>2020-05-16 16:52:12 +0200
committerJulian T <julian@jtle.dk>2020-05-16 16:52:12 +0200
commit284afc630b3d0dd6c0079c6d3e83a73d6d1193e0 (patch)
tree3197f3d38587e53b1e4413813bc41e863ef81413
parent56f60d3409c035e12b1d7e21c14ff4f8ab43ecf9 (diff)
Added hpp assignments
-rw-r--r--sem4/hpp/m10/opg1.py17
-rw-r--r--sem4/hpp/m10/opg2.py95
-rw-r--r--sem4/hpp/m10/template.py59
-rw-r--r--sem4/hpp/m9/opgave3.py31
-rw-r--r--sem4/hpp/m9/opgave5.py59
-rw-r--r--sem4/hpp/m9/opgave6.py59
-rw-r--r--sem4/hpp/m9/opgaver.md7
7 files changed, 327 insertions, 0 deletions
diff --git a/sem4/hpp/m10/opg1.py b/sem4/hpp/m10/opg1.py
new file mode 100644
index 0000000..b8de12a
--- /dev/null
+++ b/sem4/hpp/m10/opg1.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python3
+# Measure the performance of your Python matrix multiplication
+import numpy as np
+import time
+
+size=1000
+
+a = np.random.random((size, size))
+b = np.random.random((size, size))
+
+start = time.time()
+result = a @ b
+end = time.time()
+
+
+print(result)
+print(f"Took { end - start } seconds")
diff --git a/sem4/hpp/m10/opg2.py b/sem4/hpp/m10/opg2.py
new file mode 100644
index 0000000..6602d4d
--- /dev/null
+++ b/sem4/hpp/m10/opg2.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+# Make a "naive" implementation of a matrix multiplication.
+# - Start from the template from last lecture - all global memory, each work item calculates one value of the result independent of the others, and so on.
+# - Feel free to make assumptions on the size, for instance that the matrices are square, small enough to fit in GPU memory and so on.
+# - Make sure to test your kernel thoroughly enough that you trust it is correct.
+
+# Im asuming that the buffers fit in memory
+import numpy as np
+import pyopencl as cl
+import time
+
+# Source of the kernel itself.
+kernel_source = """
+__kernel void matrixmult(
+ const uint shared_dim,
+ __global const float *a_device,
+ __global const float *b_device,
+ __global float *result_device)
+{
+ // get the i'th row of matrix a
+ int index_a = get_global_id(0) * shared_dim;
+
+ // get the start of the i'th column of b.
+ // Remember we should index this by jumping b's row size (bcols, or get_global_size(1)).
+ int index_b = get_global_id(1);
+ int b_jump = get_global_size(1);
+
+ // Do the vector dot
+ float result = 0;
+ for (int i = 0; i < shared_dim; i++) {
+ result += a_device[index_a + i] * b_device[index_b];
+
+ // Remember we need to move b by it's column size to
+ // skip to the next row
+ index_b += b_jump;
+ }
+
+ // Save the result
+ result_device[get_global_id(0) * get_global_size(1) + get_global_id(1)] = result;
+}
+"""
+
+# matrix a rows
+arows = 500
+bcols = 1000
+# A columns and b rows
+shared = 1000
+
+# Create the context (containing platform and device information) and command queue.
+context = cl.create_some_context()
+cmd_queue = cl.CommandQueue(context)
+
+# Create the host side data and a empty array to hold the result.
+a_host = np.random.random((arows, shared)).astype(np.float32)
+b_host = np.random.random((shared, bcols)).astype(np.float32)
+result_host = np.empty((arows, bcols)).astype(np.float32)
+
+# If you want to keep the kernel in a seperate file uncomment this line and adjust the filename
+#kernel_source = open("kernel.cl").read()
+
+# Create a new program from the kernel and build the source.
+prog = cl.Program(context, kernel_source).build()
+
+start = time.time()
+# Create a device side read-only memory buffer and copy the data from "hostbuf" into it.
+# Create as many
+# You can find the other possible mem_flags values at
+# https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clCreateBuffer.html
+mf = cl.mem_flags
+a_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
+b_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
+result_device = cl.Buffer(context, mf.WRITE_ONLY, result_host.nbytes)
+
+# Execute the "sum" kernel in the program. Parameters are:
+#
+# Command queue Work group size Kernel param 1
+# ↓ Global grid size ↓ Kernel param 0 ↓ Kernel param 2
+# ↓ ↓ ↓ ↓ ↓ ↓
+prog.matrixmult(cmd_queue, result_host.shape, None, np.uint32(shared), a_device, b_device, result_device)
+
+# Copy the result back from device to host.
+cl.enqueue_copy(cmd_queue, result_host, result_device)
+
+end = time.time()
+
+print(f"GPU time {end - start} sec")
+
+
+start = time.time()
+hostcalc = a_host @ b_host
+end = time.time()
+print(f"CPU time {end - start} sec")
+
+# Check the results in the host array with Numpy.
+print("All elements close?", np.allclose(result_host, hostcalc))
diff --git a/sem4/hpp/m10/template.py b/sem4/hpp/m10/template.py
new file mode 100644
index 0000000..0824ef0
--- /dev/null
+++ b/sem4/hpp/m10/template.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# A short template to test small kernels.
+#
+
+import numpy as np
+import pyopencl as cl
+
+VEC_SIZE = 50000
+
+# Create the context (containing platform and device information) and command queue.
+context = cl.create_some_context()
+cmd_queue = cl.CommandQueue(context)
+
+# Create the host side data and a empty array to hold the result.
+a_host = np.random.rand(VEC_SIZE).astype(np.float32)
+b_host = np.random.rand(VEC_SIZE).astype(np.float32)
+result_host = np.empty_like(a_host)
+
+# Create a device side read-only memory buffer and copy the data from "hostbuf" into it.
+# Create as many
+# You can find the other possible mem_flags values at
+# https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clCreateBuffer.html
+mf = cl.mem_flags
+a_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
+b_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
+result_device = cl.Buffer(context, mf.WRITE_ONLY, a_host.nbytes)
+
+# Source of the kernel itself.
+kernel_source = """
+__kernel void sum(
+ __global const float *a_device,
+ __global const float *b_device,
+ __global float *result_device)
+{
+ int gid = get_global_id(0);
+ result_device[gid] = a_device[gid] * b_device[gid];
+}
+"""
+
+# If you want to keep the kernel in a seperate file uncomment this line and adjust the filename
+#kernel_source = open("kernel.cl").read()
+
+# Create a new program from the kernel and build the source.
+prog = cl.Program(context, kernel_source).build()
+
+# Execute the "sum" kernel in the program. Parameters are:
+#
+# Command queue Work group size Kernel param 1
+# ↓ Global grid size ↓ Kernel param 0 ↓ Kernel param 2
+# ↓ ↓ ↓ ↓ ↓ ↓
+prog.sum(cmd_queue, a_host.shape, None, a_device, b_device, result_device)
+
+# Copy the result back from device to host.
+cl.enqueue_copy(cmd_queue, result_host, result_device)
+
+# Check the results in the host array with Numpy.
+print("All elements close?", np.allclose(result_host, (a_host * b_host)))
diff --git a/sem4/hpp/m9/opgave3.py b/sem4/hpp/m9/opgave3.py
new file mode 100644
index 0000000..15505eb
--- /dev/null
+++ b/sem4/hpp/m9/opgave3.py
@@ -0,0 +1,31 @@
+import numpy as np
+
+def matrixmult(a, b):
+ res = np.empty((a.shape[0], b.shape[1]))
+ for ic, c in enumerate(b.T):
+ for ir, r in enumerate(a):
+ res[ir][ic] = np.dot(c, r)
+
+ return res
+
+a = np.random.random((100, 300))
+b = np.random.random((300, 100))
+
+print("a")
+print(a)
+print("b")
+print(b)
+
+custom = matrixmult(a, b)
+
+ref = a @ b
+
+print("custom")
+print(custom)
+print("ref")
+print(ref)
+
+if np.array_equal(custom, ref):
+ print("Yay they are the same, well done")
+else:
+ print("Not the same, bummer")
diff --git a/sem4/hpp/m9/opgave5.py b/sem4/hpp/m9/opgave5.py
new file mode 100644
index 0000000..0824ef0
--- /dev/null
+++ b/sem4/hpp/m9/opgave5.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# A short template to test small kernels.
+#
+
+import numpy as np
+import pyopencl as cl
+
+VEC_SIZE = 50000
+
+# Create the context (containing platform and device information) and command queue.
+context = cl.create_some_context()
+cmd_queue = cl.CommandQueue(context)
+
+# Create the host side data and a empty array to hold the result.
+a_host = np.random.rand(VEC_SIZE).astype(np.float32)
+b_host = np.random.rand(VEC_SIZE).astype(np.float32)
+result_host = np.empty_like(a_host)
+
+# Create a device side read-only memory buffer and copy the data from "hostbuf" into it.
+# Create as many
+# You can find the other possible mem_flags values at
+# https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clCreateBuffer.html
+mf = cl.mem_flags
+a_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
+b_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
+result_device = cl.Buffer(context, mf.WRITE_ONLY, a_host.nbytes)
+
+# Source of the kernel itself.
+kernel_source = """
+__kernel void sum(
+ __global const float *a_device,
+ __global const float *b_device,
+ __global float *result_device)
+{
+ int gid = get_global_id(0);
+ result_device[gid] = a_device[gid] * b_device[gid];
+}
+"""
+
+# If you want to keep the kernel in a seperate file uncomment this line and adjust the filename
+#kernel_source = open("kernel.cl").read()
+
+# Create a new program from the kernel and build the source.
+prog = cl.Program(context, kernel_source).build()
+
+# Execute the "sum" kernel in the program. Parameters are:
+#
+# Command queue Work group size Kernel param 1
+# ↓ Global grid size ↓ Kernel param 0 ↓ Kernel param 2
+# ↓ ↓ ↓ ↓ ↓ ↓
+prog.sum(cmd_queue, a_host.shape, None, a_device, b_device, result_device)
+
+# Copy the result back from device to host.
+cl.enqueue_copy(cmd_queue, result_host, result_device)
+
+# Check the results in the host array with Numpy.
+print("All elements close?", np.allclose(result_host, (a_host * b_host)))
diff --git a/sem4/hpp/m9/opgave6.py b/sem4/hpp/m9/opgave6.py
new file mode 100644
index 0000000..3921551
--- /dev/null
+++ b/sem4/hpp/m9/opgave6.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# A short template to test small kernels.
+#
+
+import numpy as np
+import pyopencl as cl
+
+VEC_SIZE = 50000
+
+# Create the context (containing platform and device information) and command queue.
+context = cl.create_some_context()
+cmd_queue = cl.CommandQueue(context)
+
+# Create the host side data and a empty array to hold the result.
+a_host = np.random.rand(VEC_SIZE).astype(np.float32)
+b_host = np.random.rand(VEC_SIZE).astype(np.float32)
+result_host = np.empty_like(a_host)
+
+# Create a device side read-only memory buffer and copy the data from "hostbuf" into it.
+# Create as many
+# You can find the other possible mem_flags values at
+# https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clCreateBuffer.html
+mf = cl.mem_flags
+a_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
+b_device = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
+result_device = cl.Buffer(context, mf.WRITE_ONLY, a_host.nbytes)
+
+# Source of the kernel itself.
+kernel_source = """
+__kernel void sum(
+ __global const float *a_device,
+ __global const float *b_device,
+ __global float *result_device)
+{
+ int gid = get_global_id(0);
+ result_device[gid] = a_device[gid] * b_device[gid];
+}
+"""
+
+# If you want to keep the kernel in a seperate file uncomment this line and adjust the filename
+#kernel_source = open("kernel.cl").read()
+
+# Create a new program from the kernel and build the source.
+prog = cl.Program(context, kernel_source).build()
+
+# Execute the "sum" kernel in the program. Parameters are:
+#
+# Command queue Work group size Kernel param 1
+# ↓ Global grid size ↓ Kernel param 0 ↓ Kernel param 2
+# ↓ ↓ ↓ ↓ ↓ ↓
+prog.sum(cmd_queue, a_host.shape, None, a_device, b_device, result_device)
+
+# Copy the result back from device to host.
+cl.enqueue_copy(cmd_queue, result_host, result_device)
+
+# Check the results in the host array with Numpy.
+print("All elements close?", np.allclose(np.sum(result_host), np.dot(a_host, b_host)))
diff --git a/sem4/hpp/m9/opgaver.md b/sem4/hpp/m9/opgaver.md
new file mode 100644
index 0000000..15ccf1e
--- /dev/null
+++ b/sem4/hpp/m9/opgaver.md
@@ -0,0 +1,7 @@
+## Opgave 1, 2
+
+> How many operations are involved in the multiplication?
+> Assume that all three matricies are of the data type float (IEEE754, aka Binary32, 4 bytes floating point). How much storage is needed to perform the operation?
+
+
+Løste denne i notesbog