Cleanup
This commit is contained in:
@@ -6,8 +6,6 @@ import json
|
|||||||
import polars as pl
|
import polars as pl
|
||||||
import dataclasses
|
import dataclasses
|
||||||
|
|
||||||
import workloads as wl
|
|
||||||
|
|
||||||
from config import Statistics, Configuration
|
from config import Statistics, Configuration
|
||||||
|
|
||||||
device = torch.device("cuda:0")
|
device = torch.device("cuda:0")
|
||||||
|
|||||||
@@ -33,4 +33,4 @@ for workload in workloads:
|
|||||||
print(result)
|
print(result)
|
||||||
|
|
||||||
df = pl.DataFrame(results)
|
df = pl.DataFrame(results)
|
||||||
df.write_csv("rocm_results.csv")
|
df.write_csv("vega_results.csv")
|
||||||
|
|||||||
20
workloads.py
20
workloads.py
@@ -1,20 +0,0 @@
|
|||||||
import torch
|
|
||||||
|
|
||||||
def gemv(matrix, input_vector):
|
|
||||||
return torch.matmul(matrix, input_vector)
|
|
||||||
|
|
||||||
def gemv_layers(matrix, input_vector):
|
|
||||||
for _ in range(5):
|
|
||||||
input_vector = torch.matmul(matrix, input_vector)
|
|
||||||
input_vector.relu()
|
|
||||||
|
|
||||||
return input_vector
|
|
||||||
|
|
||||||
def vadd(vector_a, vector_b):
|
|
||||||
return torch.add(vector_a, vector_b)
|
|
||||||
|
|
||||||
def vmul(vector_a, vector_b):
|
|
||||||
return torch.mul(vector_a, vector_b)
|
|
||||||
|
|
||||||
def haxpy(vector_a, vector_b):
|
|
||||||
return torch.add(vector_a, vector_b, alpha=2)
|
|
||||||
Reference in New Issue
Block a user