medmekk HF Staff commited on
Commit
8b8d759
·
1 Parent(s): 317612c

new builds

Browse files
Files changed (43) hide show
  1. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__init__.py +7 -15
  2. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc +0 -0
  3. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc +0 -0
  4. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc +0 -0
  5. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/_ops.py +3 -3
  6. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} +1 -1
  7. build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/layers.py +15 -0
  8. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__init__.py +7 -15
  9. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc +0 -0
  10. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc +0 -0
  11. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc +0 -0
  12. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/_ops.py +3 -3
  13. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} +2 -2
  14. build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/layers.py +15 -0
  15. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__init__.py +7 -15
  16. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc +0 -0
  17. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc +0 -0
  18. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc +0 -0
  19. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/_ops.py +3 -3
  20. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} +1 -1
  21. build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/layers.py +15 -0
  22. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__init__.py +7 -15
  23. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc +0 -0
  24. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc +0 -0
  25. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc +0 -0
  26. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/_ops.py +3 -3
  27. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} +2 -2
  28. build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/layers.py +15 -0
  29. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__init__.py +7 -15
  30. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc +0 -0
  31. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc +0 -0
  32. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc +0 -0
  33. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/_ops.py +3 -3
  34. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} +1 -1
  35. build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/layers.py +15 -0
  36. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__init__.py +7 -15
  37. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc +0 -0
  38. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc +0 -0
  39. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc +0 -0
  40. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/_ops.py +3 -3
  41. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/_rmsnorm_kernel_20250918082933.abi3.so +3 -0
  42. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/_rmsnorm_kernel_538355f_dirty.abi3.so +0 -3
  43. build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/layers.py +15 -0
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__init__.py CHANGED
@@ -1,21 +1,13 @@
1
  import torch
2
- import torch.nn as nn
3
 
4
  from ._ops import ops
5
 
 
6
 
7
- class LlamaRMSNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
 
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- return ops.rmsnorm_forward(
13
- hidden_states,
14
- self.weight,
15
- bias=None,
16
- residual=None,
17
- eps=self.variance_epsilon,
18
- dropout_p=0.0,
19
- prenorm=False,
20
- residual_in_fp32=False,
21
- )
 
1
  import torch
 
2
 
3
  from ._ops import ops
4
 
5
+ from . import layers
6
 
7
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
8
+ return ops.rmsnorm_forward(x, weight)
 
9
 
10
+ __all__ = [
11
+ "layers",
12
+ "rmsnorm_forward",
13
+ ]
 
 
 
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc differ
 
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc differ
 
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc ADDED
Binary file (995 Bytes). View file
 
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rmsnorm_kernel_538355f_dirty
3
- ops = torch.ops._rmsnorm_kernel_538355f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rmsnorm_kernel_538355f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _rmsnorm_kernel_20250918082933
3
+ ops = torch.ops._rmsnorm_kernel_20250918082933
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rmsnorm_kernel_20250918082933::{op_name}"
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f5117ccb9a81d1c1b5330385a72dd2d17eadd56c2f87d584d5b7259d02715cb
3
  size 2111512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dab5f4cfde265a04ba3805ecc82b4f4572e50d3a50e4a4d2aef1b8ccb9d57ba4
3
  size 2111512
build/torch27-cxx11-cu118-x86_64-linux/rmsnorm_kernel/layers.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LlamaRMSNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ return ops.rmsnorm_forward(
13
+ hidden_states,
14
+ self.weight,
15
+ )
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__init__.py CHANGED
@@ -1,21 +1,13 @@
1
  import torch
2
- import torch.nn as nn
3
 
4
  from ._ops import ops
5
 
 
6
 
7
- class LlamaRMSNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
 
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- return ops.rmsnorm_forward(
13
- hidden_states,
14
- self.weight,
15
- bias=None,
16
- residual=None,
17
- eps=self.variance_epsilon,
18
- dropout_p=0.0,
19
- prenorm=False,
20
- residual_in_fp32=False,
21
- )
 
1
  import torch
 
2
 
3
  from ._ops import ops
4
 
5
+ from . import layers
6
 
7
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
8
+ return ops.rmsnorm_forward(x, weight)
 
9
 
10
+ __all__ = [
11
+ "layers",
12
+ "rmsnorm_forward",
13
+ ]
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc ADDED
Binary file (995 Bytes). View file
 
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rmsnorm_kernel_538355f_dirty
3
- ops = torch.ops._rmsnorm_kernel_538355f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rmsnorm_kernel_538355f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _rmsnorm_kernel_20250918082933
3
+ ops = torch.ops._rmsnorm_kernel_20250918082933
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rmsnorm_kernel_20250918082933::{op_name}"
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bced7bf4cd62415785bb30e3808e907246341d4e63572e70f22397258a022a41
3
- size 2250040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bcf1972c75c6fbb72321d2498cd67c18fc640be087f368fbdc1d0829e5f6382
3
+ size 2250048
build/torch27-cxx11-cu126-x86_64-linux/rmsnorm_kernel/layers.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LlamaRMSNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ return ops.rmsnorm_forward(
13
+ hidden_states,
14
+ self.weight,
15
+ )
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__init__.py CHANGED
@@ -1,21 +1,13 @@
1
  import torch
2
- import torch.nn as nn
3
 
4
  from ._ops import ops
5
 
 
6
 
7
- class LlamaRMSNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
 
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- return ops.rmsnorm_forward(
13
- hidden_states,
14
- self.weight,
15
- bias=None,
16
- residual=None,
17
- eps=self.variance_epsilon,
18
- dropout_p=0.0,
19
- prenorm=False,
20
- residual_in_fp32=False,
21
- )
 
1
  import torch
 
2
 
3
  from ._ops import ops
4
 
5
+ from . import layers
6
 
7
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
8
+ return ops.rmsnorm_forward(x, weight)
 
9
 
10
+ __all__ = [
11
+ "layers",
12
+ "rmsnorm_forward",
13
+ ]
 
 
 
 
 
 
 
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc ADDED
Binary file (995 Bytes). View file
 
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rmsnorm_kernel_538355f_dirty
3
- ops = torch.ops._rmsnorm_kernel_538355f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rmsnorm_kernel_538355f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _rmsnorm_kernel_20250918082933
3
+ ops = torch.ops._rmsnorm_kernel_20250918082933
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rmsnorm_kernel_20250918082933::{op_name}"
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b1eae77e817b754c22ccf4828f0f022866b97c4815059edc0f2a7ca13f5a82f
3
  size 2506984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1baa27ab1648e5ca308718ce3c845bbfe6106ced438dce9c4bb6881264ed6354
3
  size 2506984
build/torch27-cxx11-cu128-x86_64-linux/rmsnorm_kernel/layers.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LlamaRMSNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ return ops.rmsnorm_forward(
13
+ hidden_states,
14
+ self.weight,
15
+ )
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__init__.py CHANGED
@@ -1,21 +1,13 @@
1
  import torch
2
- import torch.nn as nn
3
 
4
  from ._ops import ops
5
 
 
6
 
7
- class LlamaRMSNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
 
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- return ops.rmsnorm_forward(
13
- hidden_states,
14
- self.weight,
15
- bias=None,
16
- residual=None,
17
- eps=self.variance_epsilon,
18
- dropout_p=0.0,
19
- prenorm=False,
20
- residual_in_fp32=False,
21
- )
 
1
  import torch
 
2
 
3
  from ._ops import ops
4
 
5
+ from . import layers
6
 
7
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
8
+ return ops.rmsnorm_forward(x, weight)
 
9
 
10
+ __all__ = [
11
+ "layers",
12
+ "rmsnorm_forward",
13
+ ]
 
 
 
 
 
 
 
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc ADDED
Binary file (995 Bytes). View file
 
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rmsnorm_kernel_538355f_dirty
3
- ops = torch.ops._rmsnorm_kernel_538355f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rmsnorm_kernel_538355f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _rmsnorm_kernel_20250918082933
3
+ ops = torch.ops._rmsnorm_kernel_20250918082933
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rmsnorm_kernel_20250918082933::{op_name}"
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d17e952ae2d5df0c7f4357b8bfbdc485c06750274dbadeda9e143272f3656332
3
- size 2198136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965341541164ead5c9220b980a8bfcdadea6b755f64d3a3510ef3a112ecb3961
3
+ size 2198144
build/torch28-cxx11-cu126-x86_64-linux/rmsnorm_kernel/layers.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LlamaRMSNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ return ops.rmsnorm_forward(
13
+ hidden_states,
14
+ self.weight,
15
+ )
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__init__.py CHANGED
@@ -1,21 +1,13 @@
1
  import torch
2
- import torch.nn as nn
3
 
4
  from ._ops import ops
5
 
 
6
 
7
- class LlamaRMSNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
 
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- return ops.rmsnorm_forward(
13
- hidden_states,
14
- self.weight,
15
- bias=None,
16
- residual=None,
17
- eps=self.variance_epsilon,
18
- dropout_p=0.0,
19
- prenorm=False,
20
- residual_in_fp32=False,
21
- )
 
1
  import torch
 
2
 
3
  from ._ops import ops
4
 
5
+ from . import layers
6
 
7
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
8
+ return ops.rmsnorm_forward(x, weight)
 
9
 
10
+ __all__ = [
11
+ "layers",
12
+ "rmsnorm_forward",
13
+ ]
 
 
 
 
 
 
 
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc ADDED
Binary file (995 Bytes). View file
 
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rmsnorm_kernel_538355f_dirty
3
- ops = torch.ops._rmsnorm_kernel_538355f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rmsnorm_kernel_538355f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _rmsnorm_kernel_20250918082933
3
+ ops = torch.ops._rmsnorm_kernel_20250918082933
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rmsnorm_kernel_20250918082933::{op_name}"
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/{_rmsnorm_kernel_538355f_dirty.abi3.so → _rmsnorm_kernel_20250918082933.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff6d1ec9ceead35e32eca260b38a3828167e32f5d911f50cc9752533f81b5d40
3
  size 2446200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf1fe9ac30684982254023f11727a267fc62778660dca7f28b7cd4889815f8b1
3
  size 2446200
build/torch28-cxx11-cu128-x86_64-linux/rmsnorm_kernel/layers.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LlamaRMSNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ return ops.rmsnorm_forward(
13
+ hidden_states,
14
+ self.weight,
15
+ )
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__init__.py CHANGED
@@ -1,21 +1,13 @@
1
  import torch
2
- import torch.nn as nn
3
 
4
  from ._ops import ops
5
 
 
6
 
7
- class LlamaRMSNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
 
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- return ops.rmsnorm_forward(
13
- hidden_states,
14
- self.weight,
15
- bias=None,
16
- residual=None,
17
- eps=self.variance_epsilon,
18
- dropout_p=0.0,
19
- prenorm=False,
20
- residual_in_fp32=False,
21
- )
 
1
  import torch
 
2
 
3
  from ._ops import ops
4
 
5
+ from . import layers
6
 
7
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
8
+ return ops.rmsnorm_forward(x, weight)
 
9
 
10
+ __all__ = [
11
+ "layers",
12
+ "rmsnorm_forward",
13
+ ]
 
 
 
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/__pycache__/layers.cpython-313.pyc ADDED
Binary file (995 Bytes). View file
 
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rmsnorm_kernel_538355f_dirty
3
- ops = torch.ops._rmsnorm_kernel_538355f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rmsnorm_kernel_538355f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _rmsnorm_kernel_20250918082933
3
+ ops = torch.ops._rmsnorm_kernel_20250918082933
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rmsnorm_kernel_20250918082933::{op_name}"
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/_rmsnorm_kernel_20250918082933.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30f0e8d4fcef2e3dd45c220f58b03a0c5169a2b0971667caf1814812cb3b3038
3
+ size 2458904
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/_rmsnorm_kernel_538355f_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:92b07efad367bee6cbd2371081bbfdf9a53d4fe41b9ebef76250c6a36dba8ad3
3
- size 2458896
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/rmsnorm_kernel/layers.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LlamaRMSNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ return ops.rmsnorm_forward(
13
+ hidden_states,
14
+ self.weight,
15
+ )