diff --git a/.gitignore b/.gitignore index c05a889f2..1ed3d8403 100644 --- a/.gitignore +++ b/.gitignore @@ -134,3 +134,4 @@ dmypy.json # Pyre type checker .pyre/ +*.bin diff --git a/docs/Mojo/Array.md b/docs/Mojo/Array.md index fe619f7d8..a27907e7e 100644 --- a/docs/Mojo/Array.md +++ b/docs/Mojo/Array.md @@ -27,14 +27,19 @@ Apply the Matrix Mul for two Arrays a simple matmul with two arrays ```mojo -import EasyDel as ed +from EasyDel import Array, matmul, matmul_shape + + +fn run[nelts: Int, T: DType]() raises: + # You can change this But Remember Cols of A must match Rows of B + let A: Array[T] = Array[T](True, 1, 3, 1024, 512) # True Passed to init Array + let B: Array[T] = Array[T](True, 1, 3, 512, 1024) + var C: Array[T] = Array[T](A, B) + matmul[nelts, T](C, A, B) # You Get the same result As Numpy -let E1: ed.Array[T] = ed.Array[T](1,2,18) -let E2: ed.Array[T] = ed.Array[T](1,18,64) -let C_Shape: ed.ArrayShape = ed.matmul_shape[T](E1, E2) -var C: ed.Array[T] = ed.Array[T](C_Shape) -C.fill(0.0) # Fill it with zeros -ed.matmul[ed.Array[T].nelts, T](C, E1, E2) # parallelized and accurate + +fn main() raises: + run[Array[DType.float32].nelts, DType.float32]() ``` in this code we convert 2 numpy array into easydel array and apply matmul on them then do the same in easydel and check @@ -59,9 +64,7 @@ fn run[T: DType]() raises: let E2: ed.Array[T] = ed.convert_numpy_to_easydel_array[T](A2, shape_2) let matmul_np = np.matmul(A1, A2) - - let C_Shape: ed.ArrayShape = ed.matmul_shape[T](E1, E2) - var C: ed.Array[T] = ed.Array[T](C_Shape) # Prepare Result Array for Matmul + var C: ed.Array[T] = ed.Array[T](E1, E2) # Prepare Result Array for Matmul C.fill(0.0) # Fill it with zeros ed.matmul[ed.Array[T].nelts, T](C, E1, E2) print(matmul_np) @@ -127,27 +130,46 @@ Takes DType as dynamic Input like `Array[DType.float32]` `fn __init__(inout self: Self, array_shape: ArrayShape):` + - Description: Init Array From ArrayShape(Alloc Zero). + +`fn __init__(inout self: Self, A: Self, B: Self) -> None:` + + - Description: Init Array From Two other Arrays A and B For Matmul(Alloc One). + `fn __init__(inout self: Self, vl: VariadicList[Int]):` + - Description: Init Array from VariadicList[Int](Alloc Zero). + +`fn __init__(inout self: Self, init: Bool, *dim: Int) -> None:` + + - Description: Init Array from Int Args(Depends on Passed Bool). + `fn __init__(inout self: Self, *dim: Int):` + - Description: Init Array from Int Args(Alloc Zero). + `fn __init__(inout self: Self, value: DynamicVector[FloatLiteral], shape: ArrayShape) -> None:` + - Description: Init Array from ArrayShape and load data from DynamicVector[FloatLiteral](Alloc One). + `fn __init__(inout self: Self, value: VariadicList[FloatLiteral], shape: ArrayShape) -> None:` -### Set Data from buffer + - Description: Init Array from ArrayShape and load data from VariadicList[FloatLiteral](Alloc One). -`fn set_data_from_buffer(inout self: Self, pointer: DTypePointer[T]) -> None:` +`fn __init__(inout self: Self, pointer: DTypePointer[T], *dim: Int) -> None:` -sets data from the given buffer + - Description: Init Array from IntArgs and load data from DTypePointer[T](Alloc One). -``` -fn set_data_from_buffer( - inout self: Self, pointer: DTypePointer[T], shape: VariadicList[Int] -) -> None: -``` +### Alloc + +`fn alloc(inout self: Self) -> None:` + - Description: Allocate or Init The Array. + +### Random + +`fn random(inout self: Self) -> None:` -sets data from the given buffer and change shape + - Description: Randomize The Data if the Array is Allocated. ### Dim diff --git "a/lib/mojo/EasyDel/array/array_module.\360\237\224\245" "b/lib/mojo/EasyDel/array/array_module.\360\237\224\245" index 2539dc483..d9146c923 100644 --- "a/lib/mojo/EasyDel/array/array_module.\360\237\224\245" +++ "b/lib/mojo/EasyDel/array/array_module.\360\237\224\245" @@ -31,6 +31,7 @@ from algorithm.functional import ( ) from runtime.llcl import Runtime, num_cores from memory.memory import memset_zero +from .array_utils import matmul_shape alias dims_average_size = 5 alias debuging = True @@ -200,91 +201,103 @@ struct ArrayShape: struct Array[T: DType]: var data: DTypePointer[T] var array_shape: ArrayShape + var allocated: Int alias nelts: Int = simdwidthof[T]() * 2 + alias ArrayPointer: AnyType = DTypePointer[T] - fn __init__(inout self: Self, array_shape: ArrayShape): + fn __init__(inout self: Self, array_shape: ArrayShape) -> None: + r"""Init Array From ArrayShape(Alloc Zero).""" self.array_shape = array_shape - self.data = DTypePointer[T]().alloc(self.array_shape.num_elements()) - rand[T](self.data, self.array_shape.num_elements()) + self.data = self.ArrayPointer.alloc(0) + self.allocated = 0 + + fn __init__(inout self: Self, A: Self, B: Self) -> None: + r"""Init Array From Two other Arrays A and B For Matmul(Alloc One).""" + self.__init__(matmul_shape[T](A, B)) + self.alloc() fn __init__(inout self: Self, *dim: Int): + r"""Init Array from Int Args(Alloc Zero).""" self.array_shape = ArrayShape(VariadicList(dim)) - self.data = DTypePointer[T]().alloc(self.array_shape.num_elements()) - rand[T](self.data, self.array_shape.num_elements()) - - fn __init__(inout self: Self, vl: VariadicList[Int]): + self.data = self.ArrayPointer.alloc(0) + self.allocated = 0 + + fn __init__(inout self: Self, init: Bool, *dim: Int) -> None: + r"""Init Array from Int Args(Depends on Passed Bool).""" + self.__init__(dim) + if init: + self.alloc() + self.random() + + fn __init__(inout self: Self, vl: VariadicList[Int]) -> None: + r"""Init Array from VariadicList[Int](Alloc Zero).""" self.array_shape = ArrayShape(vl) - self.data = DTypePointer[T]().alloc(self.array_shape.num_elements()) - rand[T](self.data, self.array_shape.num_elements()) + self.data = self.ArrayPointer.alloc(0) + self.allocated = 0 fn __init__( inout self: Self, value: DynamicVector[FloatLiteral], shape: ArrayShape ) -> None: + r"""Init Array from ArrayShape and load data from DynamicVector[FloatLiteral](Alloc One). + """ self.array_shape = shape do_check(len(value) == self.array_shape.num_elements(), "Data Size miss match") - self.data = DTypePointer[T]().alloc(self.array_shape.num_elements()) - + self.data = self.ArrayPointer.alloc(self.array_shape.num_elements()) + self.allocated = 1 for i in range(self.array_shape.num_elements()): self.data.simd_store[1](i, value[i]) fn __init__( inout self: Self, value: VariadicList[FloatLiteral], shape: ArrayShape ) -> None: + r"""Init Array from ArrayShape and load data from VariadicList[FloatLiteral](Alloc One). + """ self.array_shape = shape do_check(len(value) == self.array_shape.num_elements(), "Data Size miss match") - self.data = DTypePointer[T]().alloc(self.array_shape.num_elements()) + self.allocated = 1 + self.data = self.ArrayPointer.alloc(self.array_shape.num_elements()) for i in range(self.array_shape.num_elements()): self.data.simd_store[1](i, value[i]) - fn set_data_from_buffer(inout self: Self, pointer: DTypePointer[T]) -> None: + fn __init__(inout self: Self, pointer: DTypePointer[T], *dim: Int) -> None: self.data = pointer + self.array_shape = ArrayShape(dim) + self.allocated = 1 - # fn set_data_from_buffer( - # inout self: Self, pointer: DTypePointer[T], *dims: Int - # ) -> None: - # self.data = pointer - # var shape = VariadicList[Int](dims) - # do_check( - # len(shape) == self.array_shape.rank(), "Ranking Assertation Failed!" - # ) - # for i in range(len(shape)): - # do_check( - # shape[i] == self.array_shape.dim(i), "Ranking Assertation Failed!" - # ) - - fn set_data_from_buffer( - inout self: Self, pointer: DTypePointer[T], shape: VariadicList[Int] - ) -> None: - self.data = pointer + fn alloc(inout self: Self) -> None: + r"""Allocate or Init The Array.""" + self.data = self.ArrayPointer.alloc(self.array_shape.num_elements()) + self.allocated = 1 - do_check(len(shape) == self.array_shape.rank(), "Ranking Assertation Failed!") - for i in range(len(shape)): - do_check(shape[i] == self.array_shape.dim(i), "Ranking Assertation Failed!") + fn random(inout self: Self) -> None: + r"""Randomize The Data if the Array is Allocated.""" + if self.allocated == 1: + rand[T](self.data, self.array_shape.num_elements()) fn __moveinit__(inout self, owned ext: Self): - self.data = DTypePointer[T].alloc(ext.array_shape.num_elements()) + self.data = ext.data self.array_shape = ext.array_shape - - @parameter - fn _do[_nelts: Int](f: Int): - let dt = ext.data.simd_load[_nelts](0) - self.data.simd_store[_nelts](0, dt) - - vectorize[Self.nelts, _do](ext.array_shape.num_elements()) + self.allocated = ext.allocated fn __copyinit__(inout self, ext: Self): - self.data = DTypePointer[T].alloc(ext.array_shape.num_elements()) - self.array_shape = ext.array_shape + self.allocated = ext.allocated + if self.allocated == 1: + self.data = self.ArrayPointer.alloc(ext.array_shape.num_elements()) + self.array_shape = ext.array_shape - @parameter - fn _do[_nelts: Int](f: Int): - let dt = ext.data.simd_load[_nelts](0) - self.data.simd_store[_nelts](0, dt) + @parameter + fn _do[_nelts: Int](f: Int): + let dt = ext.data.simd_load[_nelts](0) + self.data.simd_store[_nelts](0, dt) - vectorize[Self.nelts, _do](ext.array_shape.num_elements()) + vectorize[Self.nelts, _do](ext.array_shape.num_elements()) + else: + self.data = self.ArrayPointer.alloc(0) + self.array_shape = ext.array_shape fn __del__(owned self): - self.data.free() + if self.allocated == 1: + self.data.free() fn dim(self: Self, i: Int) -> Int: return self.array_shape.dim(i) @@ -380,6 +393,7 @@ struct Array[T: DType]: ) let res = Self(self.array_shape) + res.alloc() let size = self.array_shape.num_elements() let last_dim = self.array_shape[-1] @@ -413,7 +427,7 @@ struct Array[T: DType]: ](self) -> Self: let res = Self(self.array_shape) let size = self.array_shape.num_elements() - + res.alloc() let last_dim = self.array_shape[-1] var dims_rest = size // last_dim @@ -440,6 +454,7 @@ struct Array[T: DType]: ], ](self: Self) -> Self: var res: Self = Self(self.array_shape) + res.alloc() @parameter fn _do(size: Int): @@ -689,6 +704,7 @@ struct Array[T: DType]: "Arrays Don't have same size", ) var res = Self(self.array_shape) + res.alloc() let last_dim: Int = res.array_shape.dim(-1) let res_size: Int = res.array_shape._size // last_dim @@ -714,6 +730,7 @@ struct Array[T: DType]: "Arrays Don't have same size", ) var res = Self(self.array_shape) + res.alloc() let last_dim: Int = res.array_shape.dim(-1) let res_size: Int = res.array_shape._size // last_dim @@ -788,9 +805,7 @@ struct Array[T: DType]: fn element_wise[_nelts: Int](j: Int): let a_i = i * self.dim(-1) + j if _nelts < nelts: - ar[0] += ( - self.load[_nelts](a_i) * other.load[_nelts](j) - ).reduce_add() + ar[0] += (self.load[1](a_i) * other.load[_nelts](j)).reduce_add() else: ar += self.load[nelts](a_i) * other.load[nelts](j) @@ -818,47 +833,54 @@ struct Array[T: DType]: res_dims.append(self.dim(i)) res_dims.append(other.dim(-1)) - var result_array = Self(ArrayShape(res_dims)) - result_array.fill(0.0) + var C = Self(ArrayShape(res_dims)) + C.fill(0.0) + + C.fill(0.0) + if self.rank() != other.rank(): + print("Can not performe operation Dimensions won't match") + return C ^ + let C_C: Int = C.dim(-1) + let A_C: Int = self.dim(-1) + let C_R: Int = C.dim(-2) + let C_P: Int = C.dim(-2) * C.dim(-1) + let B_P: Int = other.dim(-2) * other.dim(-1) + let A_P: Int = self.dim(-2) * self.dim(-1) @parameter - fn C_I(y: Int, x: Int) -> Int: - return y * result_array.dim(-1) + x + fn CI(y: Int, x: Int) -> Int: + return y * C.dim(-1) + x @parameter - fn A_I(y: Int, x: Int) -> Int: + fn AI(y: Int, x: Int) -> Int: return y * self.dim(-1) + x @parameter - fn B_I(y: Int, x: Int) -> Int: + fn BI(y: Int, x: Int) -> Int: return y * other.dim(-1) + x - @parameter - fn loop_(i: Int) -> None: - for j in range(self.dim(-1)): - - @parameter - fn _mul[_nelts: Int](k: Int) -> None: - let c_i = C_I(i, k) - let a_i = A_I(i, j) - let b_i = B_I(j, k) - - result_array.store[_nelts]( - c_i, - result_array.load[_nelts](c_i) - + self[a_i] * other.load[_nelts](b_i), - ) + for s in range((C.num_elements() // (C_C * C_R))): + let pad_ci = s * C_P + let pad_ai = s * A_P + let pad_bi = s * B_P - # if debuging: - # print("TRIGGERING : ", c_i, " AS ", result_array[c_i]) - - vectorize[nelts, _mul](result_array.dim(-1)) - - outer_loop_func[loop_](result_array.dim(-2)) - # if debuging: - # for i in range(result_array.num_elements()): - # print("TRIGGERING RES : ", i, " AS ", result_array[i]) - return result_array ^ + @parameter + fn loop_(i: Int) -> None: + for j in range(A_C): + + @parameter + fn _mul[_nelts: Int](k: Int) -> None: + let ci: Int = CI(i, k) + pad_ci + let ai: Int = AI(i, j) + pad_ai + let bi: Int = BI(j, k) + pad_bi + C.store[_nelts]( + ci, C.load[_nelts](ci) + self[ai] * other.load[_nelts](bi) + ) + + vectorize[nelts, _mul](C_C) + + outer_loop_func[loop_](C_R) + return C ^ @always_inline fn matmul[nelts: Int](self, other: Self, rt: Runtime, n_cores: Int) -> Self: diff --git "a/lib/mojo/EasyDel/array/array_utils.\360\237\224\245" "b/lib/mojo/EasyDel/array/array_utils.\360\237\224\245" index b1e4d9e8e..376fea4c7 100644 --- "a/lib/mojo/EasyDel/array/array_utils.\360\237\224\245" +++ "b/lib/mojo/EasyDel/array/array_utils.\360\237\224\245" @@ -28,6 +28,8 @@ fn convert_numpy_to_easydel_array[ fn matmul_2d[nelts: Int, T: DType](inout C: Array[T], A: Array[T], B: Array[T]) -> None: + if C.allocated == 0: + C.alloc() @parameter fn CI(y: Int, x: Int) -> Int: return y * C.dim(-1) + x @@ -60,6 +62,8 @@ fn matmul_2d[nelts: Int, T: DType](inout C: Array[T], A: Array[T], B: Array[T]) fn matmul_single_row[nelts: Int, T: DType](inout C: Array[T], A: Array[T], B: Array[T]): if not (A.rank() == 2 and B.rank() == 1 and C.rank() == 1): print("Report Matmul Bug in matmul_single_row") + if C.allocated == 0: + C.alloc() C.fill(0.0) let C_C: Int = C.dim(-1) let A_C: Int = A.dim(-1) @@ -83,7 +87,11 @@ fn matmul_single_row[nelts: Int, T: DType](inout C: Array[T], A: Array[T], B: Ar parallelize[_loop](A.dim(-2)) -fn matmul[nelts: Int, T: DType](inout C: Array[T], A: Array[T], B: Array[T]) raises -> None: +fn matmul[ + nelts: Int, T: DType +](inout C: Array[T], A: Array[T], B: Array[T]) raises -> None: + if C.allocated == 0: + C.alloc() if A.rank() == 2 and B.rank() == 1 and C.rank() == 1: matmul_single_row[nelts, T](C, A, B) return diff --git "a/lib/mojo/EasyDel/models/llama.\360\237\224\245" "b/lib/mojo/EasyDel/models/llama.\360\237\224\245" index 44e0d0406..0c71a2e95 100644 --- "a/lib/mojo/EasyDel/models/llama.\360\237\224\245" +++ "b/lib/mojo/EasyDel/models/llama.\360\237\224\245" @@ -1,5 +1,7 @@ - from ..utilities import FileBuffer +from ..array import Array, ArrayShape + + struct LlamaConfig: var hidden_size: Int var num_attention_heads: Int @@ -39,7 +41,6 @@ struct LlamaConfig: fn __init__(inout self: Self) -> None: self.hidden_size = 512 - self.intermediate_size = self.hidden_size * 4 self.max_position_embeddings = 2048 self.num_attention_heads = 8 self.num_hidden_layers = 8 @@ -72,7 +73,7 @@ struct LlamaConfig: buffer.data.offset(buffer.offset).bitcast[DType.int32]().load(0).to_int() ) buffer.move_offset(4) - + self.vocab_size = ( buffer.data.offset(buffer.offset).bitcast[DType.int32]().load(0).to_int() ) @@ -85,10 +86,24 @@ struct LlamaConfig: self.kv_dims = ( self.num_key_value_heads * self.hidden_size ) // self.num_attention_heads - self.number_rep_kv = self.num_attention_heads // self.num_attention_heads + self.number_rep_kv = self.num_attention_heads // self.num_key_value_heads self.rms_norm_eps = 1e-5 return None + fn print_config(self: Self) -> None: + print("\033[1;36mHidden Size : ", self.hidden_size) + print("Max Position Embeddings : ", self.max_position_embeddings) + print("Num Attention Heads : ", self.num_attention_heads) + print("Num Hidden Layers : ", self.num_hidden_layers) + print("Vocab Size : ", self.vocab_size) + print("RMS Norm Epsilon : ", self.rms_norm_eps) + print("Number Repeat Key Value : ", self.number_rep_kv) + print("Number Key Value Heads : ", self.num_key_value_heads) + print("Intermediate Size : ", self.intermediate_size) + print("HEAD DIMS : ", self.head_dims) + print("KV DIMS : ", self.kv_dims) + print_no_newline("\033[1;0m") + # struct LlamaRunStateF32: # var x: MatrixF32 @@ -141,255 +156,145 @@ struct LlamaConfig: # self.rt = Runtime(num_cores() // 2) -# struct LlamaRunStateBF16: -# var x: MatrixBF16 -# var x_normed: MatrixBF16 -# var x_buffer: MatrixBF16 -# var ffn_w1: MatrixBF16 -# var ffn_w3: MatrixBF16 -# var q: MatrixBF16 -# var k: MatrixBF16 -# var v: MatrixBF16 -# var attn_weights: MatrixBF16 -# var logits: MatrixBF16 -# var key_cache: MatrixBF16 -# var value_cache: MatrixBF16 -# var rt: Runtime - -# fn __init__(inout self, config: LlamaConfig): -# self.x = MatrixBF16(config.hidden_size) -# self.x.alloc_zero() -# self.x_normed = MatrixBF16(config.hidden_size) -# self.x_normed.alloc_zero() -# self.x_buffer = MatrixBF16(config.hidden_size) -# self.x_buffer.alloc_zero() -# self.ffn_w1 = MatrixBF16(config.intermediate_size) -# self.ffn_w1.alloc_zero() -# self.ffn_w3 = MatrixBF16(config.intermediate_size) -# self.ffn_w3.alloc_zero() -# self.q = MatrixBF16(config.hidden_size) -# self.q.alloc_zero() -# self.k = MatrixBF16(0, 0) -# self.v = MatrixBF16(0, 0) -# self.attn_weights = MatrixBF16( -# config.max_position_embeddings, config.num_attention_heads -# ) -# self.attn_weights.alloc_zero() -# self.logits = MatrixBF16(config.vocab_size) -# self.logits.alloc_zero() -# self.key_cache = MatrixBF16( -# config.num_hidden_layers, -# config.kv_dims, -# config.max_position_embeddings, -# ) -# self.key_cache.alloc_zero() -# self.value_cache = MatrixBF16( -# config.num_hidden_layers, -# config.kv_dims, -# config.max_position_embeddings, -# ) -# self.value_cache.alloc_zero() -# self.rt = Runtime(num_cores() // 2) - - -# struct LlamaWeightsF32: -# var wte: MatrixF32 -# var fcr: MatrixF32 -# var fci: MatrixF32 -# var input_norm_attn_weigth: MatrixF32 -# var wq: MatrixF32 -# var wk: MatrixF32 -# var wv: MatrixF32 -# var wo: MatrixF32 -# var post_norm_weigth: MatrixF32 -# var w1: MatrixF32 -# var w3: MatrixF32 -# var w2: MatrixF32 -# var final_norm_weight: MatrixF32 -# var lm_head: MatrixF32 +# struct LlamaWeights[T: DType]: +# var wte: Array[T] +# var fcr: Array[T] +# var fci: Array[T] +# var input_norm_attn_weigth: Array[T] +# var wq: Array[T] +# var wk: Array[T] +# var wv: Array[T] +# var wo: Array[T] +# var post_norm_weigth: Array[T] +# var w1: Array[T] +# var w3: Array[T] +# var w2: Array[T] +# var final_norm_weight: Array[T] +# var lm_head: Array[T] # fn __init__( -# inout self, config: LlamaConfig, shared_weights: Bool, inout buf: FileBuffer +# inout self, +# config: LlamaConfig, +# shared_weights: Bool, +# inout buf: FileBuffer, # ) raises: -# self.wte = MatrixF32(config.hidden_size, config.vocab_size) -# self.wte.set_data_from_buffer(buf.read_value_float32(self.wte.size())) +# let size: Int = sizeof[T]() +# self.wte = Array[T](config.hidden_size, config.vocab_size) +# self.wte.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.wte.num_elements(), size) +# ) -# self.input_norm_attn_weigth = MatrixF32( +# self.input_norm_attn_weigth = Array[T]( # config.hidden_size, config.num_hidden_layers # ) # self.input_norm_attn_weigth.set_data_from_buffer( -# buf.read_value_float32(self.input_norm_attn_weigth.size()) +# buf.read_numerical_value_dynamic[T]( +# self.input_norm_attn_weigth.num_elements(), size +# ) # ) -# self.wq = MatrixF32( +# self.wq = Array[T]( # config.num_hidden_layers, # config.hidden_size, # config.hidden_size, # ) -# self.wq.set_data_from_buffer(buf.read_value_float32(self.wq.size())) -# self.wk = MatrixF32( +# self.wq.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.wq.num_elements(), size) +# ) +# self.wk = Array[T]( # config.num_hidden_layers, # config.kv_dims, # config.hidden_size, # ) -# self.wk.set_data_from_buffer(buf.read_value_float32(self.wk.size())) -# self.wv = MatrixF32( +# self.wk.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.wk.num_elements(), size) +# ) +# self.wv = Array[T]( # config.num_hidden_layers, # config.kv_dims, # config.hidden_size, # ) -# self.wv.set_data_from_buffer(buf.read_value_float32(self.wv.size())) -# self.wo = MatrixF32( + +# self.wv.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.wv.num_elements(), size) +# ) + +# self.wo = Array[T]( # config.num_hidden_layers, # config.hidden_size, # config.hidden_size, # ) -# self.wo.set_data_from_buffer(buf.read_value_float32(self.wo.size())) -# self.post_norm_weigth = MatrixF32( -# config.hidden_size, config.num_hidden_layers + +# self.wo.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.wo.num_elements(), size) # ) + +# self.post_norm_weigth = Array[T](config.hidden_size, config.num_hidden_layers) # self.post_norm_weigth.set_data_from_buffer( -# buf.read_value_float32(self.post_norm_weigth.size()) +# buf.read_numerical_value_dynamic[T]( +# self.post_norm_weigth.num_elements(), size +# ) # ) -# self.w1 = MatrixF32( + +# self.w1 = Array[T]( # config.num_hidden_layers, # config.hidden_size, # config.intermediate_size, # ) -# self.w1.set_data_from_buffer(buf.read_value_float32(self.w1.size())) -# self.w2 = MatrixF32( -# config.num_hidden_layers, -# config.hidden_size, -# config.intermediate_size, +# self.w1.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.w1.num_elements(), size) # ) -# self.w2.set_data_from_buffer(buf.read_value_float32(self.w2.size())) -# self.w3 = MatrixF32( + +# self.w2 = Array[T]( # config.num_hidden_layers, # config.hidden_size, # config.intermediate_size, # ) -# self.w3.set_data_from_buffer(buf.read_value_float32(self.w3.size())) -# self.final_norm_weight = MatrixF32(config.hidden_size) -# self.final_norm_weight.set_data_from_buffer( -# buf.read_value_float32(self.final_norm_weight.size()) -# ) -# self.fcr = MatrixF32( -# config.max_position_embeddings, -# (config.hidden_size // config.num_attention_heads) // 2, -# ) -# self.fcr.set_data_from_buffer(buf.read_value_float32(self.fcr.size())) -# self.fci = MatrixF32( -# config.max_position_embeddings, -# (config.hidden_size // config.num_attention_heads) // 2, -# ) -# self.fci.set_data_from_buffer(buf.read_value_float32(self.fci.size())) -# self.lm_head = MatrixF32( config.hidden_size,config.vocab_size) -# self.lm_head.set_data_from_buffer( -# self.wte.data if shared_weights else buf.read_value_float32( -# self.lm_head.size() -# ) +# self.w2.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.w2.num_elements(), size) # ) - -# struct LlamaWeightsBF16: -# var wte: MatrixBF16 -# var fcr: MatrixBF16 -# var fci: MatrixBF16 -# var input_norm_attn_weigth: MatrixBF16 -# var wq: MatrixBF16 -# var wk: MatrixBF16 -# var wv: MatrixBF16 -# var wo: MatrixBF16 -# var post_norm_weigth: MatrixBF16 -# var w1: MatrixBF16 -# var w3: MatrixBF16 -# var w2: MatrixBF16 -# var final_norm_weight: MatrixBF16 -# var lm_head: MatrixBF16 - -# fn __init__( -# inout self, config: LlamaConfig, shared_weights: Bool, inout buf: FileBuffer -# ) raises: -# self.wte = MatrixBF16(config.hidden_size, config.vocab_size) -# self.wte.set_data_from_buffer(buf.read_value_bfloat16(self.wte.size())) - -# self.input_norm_attn_weigth = MatrixBF16( -# config.hidden_size, config.num_hidden_layers -# ) -# self.input_norm_attn_weigth.set_data_from_buffer( -# buf.read_value_bfloat16(self.input_norm_attn_weigth.size()) -# ) -# self.wq = MatrixBF16( -# config.num_hidden_layers, -# config.hidden_size, -# config.hidden_size, -# ) -# self.wq.set_data_from_buffer(buf.read_value_bfloat16(self.wq.size())) -# self.wk = MatrixBF16( -# config.num_hidden_layers, -# config.kv_dims, -# config.hidden_size, -# ) -# self.wk.set_data_from_buffer(buf.read_value_bfloat16(self.wk.size())) -# self.wv = MatrixBF16( -# config.num_hidden_layers, -# config.kv_dims, -# config.hidden_size, -# ) -# self.wv.set_data_from_buffer(buf.read_value_bfloat16(self.wv.size())) -# self.wo = MatrixBF16( -# config.num_hidden_layers, -# config.hidden_size, -# config.hidden_size, -# ) -# self.wo.set_data_from_buffer(buf.read_value_bfloat16(self.wo.size())) -# self.post_norm_weigth = MatrixBF16( -# config.hidden_size, config.num_hidden_layers -# ) -# self.post_norm_weigth.set_data_from_buffer( -# buf.read_value_bfloat16(self.post_norm_weigth.size()) -# ) -# self.w1 = MatrixBF16( -# config.num_hidden_layers, -# config.hidden_size, -# config.intermediate_size, -# ) -# self.w1.set_data_from_buffer(buf.read_value_bfloat16(self.w1.size())) -# self.w2 = MatrixBF16( +# self.w3 = Array[T]( # config.num_hidden_layers, # config.hidden_size, # config.intermediate_size, # ) -# self.w2.set_data_from_buffer(buf.read_value_bfloat16(self.w2.size())) -# self.w3 = MatrixBF16( -# config.num_hidden_layers, -# config.hidden_size, -# config.intermediate_size, +# self.w3.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.w3.num_elements(), size) # ) -# self.w3.set_data_from_buffer(buf.read_value_bfloat16(self.w3.size())) -# self.final_norm_weight = MatrixBF16(config.hidden_size) + +# self.final_norm_weight = Array[T](config.hidden_size) # self.final_norm_weight.set_data_from_buffer( -# buf.read_value_bfloat16(self.final_norm_weight.size()) +# buf.read_numerical_value_dynamic[T]( +# self.final_norm_weight.num_elements(), size +# ) # ) -# self.fcr = MatrixBF16( + +# self.fcr = Array[T]( # config.max_position_embeddings, # (config.hidden_size // config.num_attention_heads) // 2, # ) -# self.fcr.set_data_from_buffer(buf.read_value_bfloat16(self.fcr.size())) -# self.fci = MatrixBF16( +# self.fcr.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.fcr.num_elements(), size) +# ) + +# self.fci = Array[T]( # config.max_position_embeddings, # (config.hidden_size // config.num_attention_heads) // 2, # ) -# self.fci.set_data_from_buffer(buf.read_value_bfloat16(self.fci.size())) -# self.lm_head = MatrixBF16( config.hidden_size,config.vocab_size) -# self.lm_head.set_data_from_buffer( -# self.wte.data if shared_weights else buf.read_value_bfloat16( -# self.lm_head.size() -# ) +# self.fci.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.fci.num_elements(), size) # ) +# self.lm_head = Array[T](config.hidden_size, config.vocab_size) +# if shared_weights: +# self.lm_head.set_data_from_buffer(self.wte.data) +# else: +# self.lm_head.set_data_from_buffer( +# buf.read_numerical_value_dynamic[T](self.lm_head.num_elements(), size) +# ) # fn llama_forward_fp32[ -# rope_rotation: fn ( +# rope_rotation: fn ( # inout q: DTypePointer[DType.float32], # inout k: DTypePointer[DType.float32], # fcr_row: DTypePointer[DType.float32], @@ -509,118 +414,3 @@ struct LlamaConfig: # matrix_state.set_data_from_buffer(weights.lm_head.data, hidden_size,config.vocab_size) # matmul[nelts](state.logits, state.x, matrix_state, state.rt) - - - -# fn llama_forward_bf16[ -# rope_rotation: fn ( -# inout q: DTypePointer[DType.bfloat16], -# inout k: DTypePointer[DType.bfloat16], -# fcr_row: DTypePointer[DType.bfloat16], -# fci_row: DTypePointer[DType.bfloat16], -# num_attention_heads: Int, -# num_key_values_head: Int, -# head_dims: Int -# ) -> None, -# matmul:fn [nelts:Int]( -# inout C: MatrixBF16, A: MatrixBF16, B: MatrixBF16, _rt: Runtime -# )->None, -# nelts:Int -# ]( -# input_id: Int, -# position: Int, -# config: LlamaConfig, -# inout state: LlamaRunStateBF16, -# weights: LlamaWeightsBF16, -# ) -> None: -# var x = state.x.data -# let hidden_size = config.hidden_size -# let intermediate_size = config.intermediate_size -# let kv_dims = config.kv_dims -# let number_rep_kv = config.number_rep_kv -# let head_dims = config.head_dims - -# var matrix_state = MatrixBF16(0, 0) -# let content_row = weights.wte.data.offset(input_id * hidden_size) -# memcpy[DType.bfloat16](x, content_row, config.hidden_size) - -# let freq_cis_real_row = weights.fcr.data.offset(position * head_dims // 2) -# let freq_cis_imag_row = weights.fci.data.offset(position * head_dims // 2) - -# for layer_index in range(config.num_hidden_layers): -# rms_norm[nelts](state.x_normed.data, x, weights.input_norm_attn_weigth.data.offset(layer_index * hidden_size), SIMD[DType.bfloat16,1](config.rms_norm_eps.to_int()) ,hidden_size,state.rt) - -# matrix_state.set_data_from_buffer(weights.wq.data.offset(layer_index * hidden_size * hidden_size), hidden_size,hidden_size) -# matmul[nelts](state.q, state.x_normed, matrix_state, state.rt) - -# let padding = layer_index * config.max_position_embeddings * kv_dims -# state.k.set_data_from_buffer(state.key_cache.data.offset(padding + position * kv_dims), kv_dims,1) -# matrix_state.set_data_from_buffer(weights.wk.data.offset(layer_index * hidden_size * kv_dims), hidden_size,kv_dims) -# matmul[nelts](state.k, state.x_normed, matrix_state, state.rt) - -# state.v.set_data_from_buffer( -# state.value_cache.data.offset(padding + position * kv_dims),kv_dims,1 -# ) -# matrix_state.set_data_from_buffer(weights.wv.data.offset(layer_index * hidden_size * kv_dims), hidden_size,kv_dims) -# matmul[nelts](state.v, state.x_normed, matrix_state, state.rt) - -# rope_rotation(state.q.data,state.k.data, freq_cis_real_row, freq_cis_imag_row, config.num_attention_heads,config.num_key_value_heads,config.head_dims) - -# for head_index in range(config.num_attention_heads): -# let q = state.q.data.offset(head_index * head_dims) - -# var attn_weights = state.attn_weights.data.offset(head_index * config.max_position_embeddings) - -# for current_pos in range(position + 1): -# let k = state.key_cache.data.offset( -# padding + current_pos * kv_dims + (head_index // number_rep_kv) * head_dims -# ) -# var score: SIMD[DType.bfloat16,1] = 0.0 -# for i in range(head_dims): -# score += q.offset(i).load(0) * k.offset(i).load(0) -# score /= math.sqrt[DType.bfloat16, 1](head_dims) - -# attn_weights.offset(current_pos).store(0, score) - -# softmax[nelts](attn_weights, position + 1) - -# let x_normed = state.x_normed.data.offset(head_index * head_dims) -# memset_zero(x_normed, head_dims) -# for current_pos in range(position + 1): -# let v = state.value_cache.data.offset( -# padding + current_pos * kv_dims + (head_index // number_rep_kv) * head_dims -# ) -# let a = attn_weights.offset(current_pos).load(0) -# for i in range(head_dims): -# let xbi = x_normed.offset(i).load(0) + a * v.offset(i).load(0) -# x_normed.offset(i).store(0, xbi) -# matrix_state.set_data_from_buffer(weights.wo.data.offset(layer_index * hidden_size * hidden_size),hidden_size,hidden_size) -# matmul[nelts](state.x_buffer, state.x_normed, matrix_state, state.rt) - -# add_pointers[nelts](x, state.x_buffer.data, hidden_size) - -# rms_norm[nelts](state.x_normed.data, x, weights.post_norm_weigth.data.offset(layer_index * hidden_size), SIMD[DType.bfloat16,1](config.rms_norm_eps.to_int()), hidden_size,state.rt) - -# matrix_state.set_data_from_buffer(weights.w1.data.offset(layer_index * hidden_size * intermediate_size), hidden_size,intermediate_size) -# matmul[nelts](state.ffn_w1, state.x_normed, matrix_state, state.rt) - -# matrix_state.set_data_from_buffer(weights.w3.data.offset(layer_index * hidden_size * intermediate_size), hidden_size,intermediate_size) -# matmul[nelts](state.ffn_w3, state.x_normed, matrix_state, state.rt) - -# silu(state.ffn_w1,intermediate_size,state.rt) - -# @parameter -# fn element_wise(ii:Int) -> None: -# state.ffn_w1[ii] = state.ffn_w1[ii] * state.ffn_w3[ii] - -# parallelize[element_wise](state.rt,intermediate_size,state.rt.parallelism_level()) -# matrix_state.set_data_from_buffer(weights.w2.data.offset(layer_index * hidden_size * intermediate_size), intermediate_size,hidden_size) -# matmul[nelts](state.x_normed, state.ffn_w1, matrix_state, state.rt) - -# add_pointers[nelts](x, state.x_normed.data, hidden_size) - -# rms_norm[nelts](x, x, weights.final_norm_weight.data, SIMD[DType.bfloat16,1](config.rms_norm_eps.to_int()), hidden_size,state.rt) - -# matrix_state.set_data_from_buffer(weights.lm_head.data, hidden_size,config.vocab_size) -# matmul[nelts](state.logits, state.x, matrix_state, state.rt) - diff --git "a/lib/mojo/EasyDel/utilities/read.\360\237\224\245" "b/lib/mojo/EasyDel/utilities/read.\360\237\224\245" index 2955b1a4c..ac1441513 100644 --- "a/lib/mojo/EasyDel/utilities/read.\360\237\224\245" +++ "b/lib/mojo/EasyDel/utilities/read.\360\237\224\245" @@ -1,4 +1,5 @@ from python import Python as Py +from ..in_out import File, BufReader struct FileBuffer: @@ -23,21 +24,21 @@ struct FileBuffer: fn read_value_float32( inout self: Self, size: Int ) raises -> DTypePointer[DType.float32]: - let res = self.data.offset(self.offset).bitcast[DType.float32]() + let res = self.data.offset(self.get_offset()).bitcast[DType.float32]() self.move_offset(4 * size) return res - fn read_value_int( - inout self: Self - ) raises -> Int: - let res = self.data.offset(self.offset).bitcast[DType.int32]().load(0).to_int() + fn read_value_int(inout self: Self) raises -> Int: + let res = self.data.offset(self.get_offset()).bitcast[DType.int32]().load( + 0 + ).to_int() self.move_offset(4) return res fn read_value_float16( inout self: Self, size: Int ) raises -> DTypePointer[DType.float16]: - let res = self.data.offset(self.offset).bitcast[DType.float16]() + let res = self.data.offset(self.get_offset()).bitcast[DType.float16]() self.move_offset(sizeof[DType.float16]() * size) return res @@ -45,7 +46,7 @@ struct FileBuffer: fn read_value_bfloat16( inout self: Self, size: Int ) raises -> DTypePointer[DType.bfloat16]: - let res = self.data.offset(self.offset).bitcast[DType.bfloat16]() + let res = self.data.offset(self.get_offset()).bitcast[DType.bfloat16]() self.move_offset(sizeof[DType.bfloat16]() * size) return res @@ -53,7 +54,7 @@ struct FileBuffer: fn read_value_uint8( inout self: Self, size: Int ) raises -> DTypePointer[DType.uint8]: - let res = self.data.offset(self.offset).bitcast[DType.uint8]() + let res = self.data.offset(self.get_offset()).bitcast[DType.uint8]() self.move_offset(sizeof[DType.uint8]() * size) return res @@ -61,7 +62,7 @@ struct FileBuffer: fn read_value_uint16( inout self: Self, size: Int ) raises -> DTypePointer[DType.uint16]: - let res = self.data.offset(self.offset).bitcast[DType.uint16]() + let res = self.data.offset(self.get_offset()).bitcast[DType.uint16]() self.move_offset(sizeof[DType.uint16]() * size) return res @@ -69,7 +70,7 @@ struct FileBuffer: fn read_value_uint32( inout self: Self, size: Int ) raises -> DTypePointer[DType.uint32]: - let res = self.data.offset(self.offset).bitcast[DType.uint32]() + let res = self.data.offset(self.get_offset()).bitcast[DType.uint32]() self.move_offset(sizeof[DType.uint32]() * size) return res @@ -77,11 +78,19 @@ struct FileBuffer: fn read_numerical_value_dynamic[ T: DType ](inout self: Self, size: Int) raises -> DTypePointer[T]: - let res = self.data.offset(self.offset).bitcast[T]() + let res = self.data.offset(self.get_offset()).bitcast[T]() self.move_offset(sizeof[T]() * size) return res + fn read_numerical_value_dynamic[ + T: DType + ](inout self: Self, size: Int, fs: Int) raises -> DTypePointer[T]: + let res = self.data.offset(self.get_offset()).bitcast[T]() + + self.move_offset(fs * size) + return res + fn get_offset(self: Self) -> Int: return self.offset if self.offset < self.size else self.size @@ -95,7 +104,6 @@ fn read_numerical_value[T: DType](inout buffer: FileBuffer) raises -> SIMD[T, 1] fn read_string_value( inout buffer: FileBuffer, string_length: Int ) raises -> Pointer[UInt8]: - let str = Pointer[UInt8].alloc(string_length + 1) for i in range(string_length): diff --git a/lib/mojo/README.md b/lib/mojo/README.md index 1c9146e9f..eb49ac7b6 100644 --- a/lib/mojo/README.md +++ b/lib/mojo/README.md @@ -4,6 +4,11 @@ EasyDel Mojo differs from EasyDel in Python in significant ways. In Python, you There are several reasons why building packages in Mojo is more efficient than importing them from Python. Firstly, when you import packages from Python, you incur the overhead of translating and processing the Python code into Mojo code, which takes time. Secondly, the Python code may not be optimized for the Mojo runtime environment, leading to slower performance. Lastly, building packages directly in Mojo allows you to design and optimize them explicitly for the Mojo runtime environment, resulting in faster and more efficient code. With Mojo's built-in array capabilities that are 35000x faster than Python, it's time to take your coding to the next level. +## Docs + +* _EasyDel MojoπŸ”₯_ : + * [README TreeπŸ”₯](https://erfanzar.github.io/EasyDeL/lib/mojo) + ## Array API Array API can be used just like Numpy Arrays For example @@ -75,50 +80,50 @@ fn main() raises: #### Math Supported Operation For Arrays -| Operation | Array[DT.F64] | Array[DT.F32] | Array[DT.F16] | Array[DT.BF16] | -| --------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | -| `Sqrt` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Sin` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Cos` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Tanh` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Tan` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Log` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Log2` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Atan` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Exp` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Exp2` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Pow` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Log10` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Log1p` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Logb` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Asin` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Acos` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | -| `Acosh` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| Operation | Array[DT.F64] | Array[DT.F32] | Array[DT.F16] | Array[DT.BF16] | +| --------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `Sqrt` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Sin` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Cos` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Tanh` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Tan` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Log` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Log2` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Atan` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Exp` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Exp2` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Pow` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Log10` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Log1p` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Logb` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Asin` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Acos` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | +| `Acosh` | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | [βœ…](https://emojipedia.org/check-mark-button) | #### Supported Operations Between Arrays -| Operation Sign | Func | Supported Array TO Array | Supported Array TO SIMD | -| -------------- | ------------------- | ------------------------------------------- | ----------------------- | -| `+` | `__add__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `-` | `__sub__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `*` | `__mul__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `/` | `__truediv__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `//` | `__floordiv__()` | ❌ Not Yet | ❌ Not Yet | -| `@` | `__matmul__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `%` | `__mod__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `**` | `__pow__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `+=` | `__iadd__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `-=` | `__isub__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `*=` | `__imul__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `/=` | `__itruediv__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `//=` | `__ifloordiv__()` | ❌ Not Yet | ❌ Not Yet | -| `**=` | `__ipow__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `==` | `__eq__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `!=` | `__ne__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `<` | `__lt__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `>` | `__gt__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `[]` | `__getitem__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | -| `[]` | `__setitem__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| Operation Sign | Func | Supported Array TO Array | Supported Array TO SIMD | +| -------------- | ----------------- | --------------------------------------------- | ----------------------- | +| `+` | `__add__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `-` | `__sub__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `*` | `__mul__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `/` | `__truediv__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `//` | `__floordiv__()` | ❌ Not Yet | ❌ Not Yet | +| `@` | `__matmul__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `%` | `__mod__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `**` | `__pow__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `+=` | `__iadd__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `-=` | `__isub__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `*=` | `__imul__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `/=` | `__itruediv__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `//=` | `__ifloordiv__()` | ❌ Not Yet | ❌ Not Yet | +| `**=` | `__ipow__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `==` | `__eq__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `!=` | `__ne__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `<` | `__lt__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `>` | `__gt__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `[]` | `__getitem__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | +| `[]` | `__setitem__()` | [βœ…](https://emojipedia.org/check-mark-button) | ❌ Not Yet | ### Road Map