kernel
danieldk HF Staff commited on
Commit
b2bfe4f
·
1 Parent(s): c558d47
Files changed (44) hide show
  1. build/torch25-cxx11-cu118-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} +2 -2
  2. build/torch25-cxx11-cu118-x86_64-linux/moe/_ops.py +3 -3
  3. build/torch25-cxx11-cu118-x86_64-linux/moe/layers.py +2 -0
  4. build/torch25-cxx11-cu121-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} +2 -2
  5. build/torch25-cxx11-cu121-x86_64-linux/moe/_ops.py +3 -3
  6. build/torch25-cxx11-cu121-x86_64-linux/moe/layers.py +2 -0
  7. build/torch25-cxx11-cu124-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} +2 -2
  8. build/torch25-cxx11-cu124-x86_64-linux/moe/_ops.py +3 -3
  9. build/torch25-cxx11-cu124-x86_64-linux/moe/layers.py +2 -0
  10. build/torch25-cxx98-cu118-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} +2 -2
  11. build/torch25-cxx98-cu118-x86_64-linux/moe/_ops.py +3 -3
  12. build/torch25-cxx98-cu118-x86_64-linux/moe/layers.py +2 -0
  13. build/torch25-cxx98-cu121-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  14. build/torch25-cxx98-cu121-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  15. build/torch25-cxx98-cu121-x86_64-linux/moe/_ops.py +3 -3
  16. build/torch25-cxx98-cu121-x86_64-linux/moe/layers.py +2 -0
  17. build/torch25-cxx98-cu124-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  18. build/torch25-cxx98-cu124-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  19. build/torch25-cxx98-cu124-x86_64-linux/moe/_ops.py +3 -3
  20. build/torch25-cxx98-cu124-x86_64-linux/moe/layers.py +2 -0
  21. build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  22. build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  23. build/torch26-cxx11-cu118-x86_64-linux/moe/_ops.py +3 -3
  24. build/torch26-cxx11-cu118-x86_64-linux/moe/layers.py +2 -0
  25. build/torch26-cxx11-cu124-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  26. build/torch26-cxx11-cu124-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  27. build/torch26-cxx11-cu124-x86_64-linux/moe/_ops.py +3 -3
  28. build/torch26-cxx11-cu124-x86_64-linux/moe/layers.py +2 -0
  29. build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  30. build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  31. build/torch26-cxx11-cu126-x86_64-linux/moe/_ops.py +3 -3
  32. build/torch26-cxx11-cu126-x86_64-linux/moe/layers.py +2 -0
  33. build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  34. build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  35. build/torch26-cxx98-cu118-x86_64-linux/moe/_ops.py +3 -3
  36. build/torch26-cxx98-cu118-x86_64-linux/moe/layers.py +2 -0
  37. build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  38. build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  39. build/torch26-cxx98-cu124-x86_64-linux/moe/_ops.py +3 -3
  40. build/torch26-cxx98-cu124-x86_64-linux/moe/layers.py +2 -0
  41. build/torch26-cxx98-cu126-x86_64-linux/moe/_moe_2218ad7.abi3.so +0 -3
  42. build/torch26-cxx98-cu126-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so +3 -0
  43. build/torch26-cxx98-cu126-x86_64-linux/moe/_ops.py +3 -3
  44. build/torch26-cxx98-cu126-x86_64-linux/moe/layers.py +2 -0
build/torch25-cxx11-cu118-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49dc6c1d936b3dc6c483a4ef5d581c5d2f08f50f6ea2ffcdbfecdf0b719c3410
3
- size 87056328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17cf96e50dd9d5632a7b5959037c5d11e79fae301989ee6315bc8e3d4d545bde
3
+ size 87056376
build/torch25-cxx11-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch25-cxx11-cu118-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch25-cxx11-cu121-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12bb26a0a9a47039bbcbf2c5fda7c068211cb711827b0e0e0d98b2fe99ed3b54
3
- size 87254968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95186380b2815d16e6b8d5cc2d87a93d45dadf31ac197028e37b7d6f65680cf1
3
+ size 87250920
build/torch25-cxx11-cu121-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch25-cxx11-cu124-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca9a24c28dab4109a13549ee7ce379b36d950930b8bd106669188262863f3795
3
- size 86965608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53bbb3f5ee8f5cbc0d640e924797f71f2c5b2767ec044470d6200fd154c7887b
3
+ size 86965656
build/torch25-cxx11-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch25-cxx98-cu118-x86_64-linux/moe/{_moe_2218ad7.abi3.so → _moe_c558d47_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d65d3a08c44b65a44d2c58566aa7e26e85d0d949be71096e09f7ad73d0b5e040
3
- size 87048408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0eedac7fe7e2bcdace617e68444affa54523ae20cf620ad92959476f93f9e9c
3
+ size 87052576
build/torch25-cxx98-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch25-cxx98-cu121-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2d4157287a3e7979780f23a709eba01e787186bc32a5e56c0620b5429e9cfd3
3
- size 87243240
 
 
 
 
build/torch25-cxx98-cu121-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:004cbfe65028ad7b4dd0c2bcffe16bbcb8fe94e3d3771b466808dbd1996fde12
3
+ size 87247384
build/torch25-cxx98-cu121-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch25-cxx98-cu124-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:122544181246b179a772eb07c9e01c8df6b3025c20b333c566d0e84bfd7bea2d
3
- size 86953880
 
 
 
 
build/torch25-cxx98-cu124-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3785d7743d33c834d0a3ac99dcb8add3a951314290dd9f7d4eb87045c7424f90
3
+ size 86958024
build/torch25-cxx98-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:49e17eb28438bddf98e314893cf262b807d64ee03850b46abe4d0bf6151f62b6
3
- size 87060352
 
 
 
 
build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc2df0cc4b652b7c1a2a34191bedc3a3190cac9e74b44403ce3f0c763bdf072
3
+ size 87060400
build/torch26-cxx11-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch26-cxx11-cu124-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f804164f561c9b46f3b997a6d13552ca4d704c43484b5cd8d14682b4450ed472
3
- size 86965664
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:511359ec7e5a3c4ea2beda0a31b005a0210aadf3d2247b87e990eefa0f8ff8ba
3
+ size 86965712
build/torch26-cxx11-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1102bf615b2d2f7c320ac73eed63b982e969683ac72c958080dddb87166fa595
3
- size 87432960
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b546237970e07f6125675971da472a7cacfaa81384f6ddfa4eb36f4da644c0fc
3
+ size 87424816
build/torch26-cxx11-cu126-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch26-cxx11-cu126-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e739bb546d3d1730fa7696fbd767fd588286dec369f1b7551edd1ec481df96f
3
- size 87044288
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:889767dfddb5c30f49394395f34b6e25c2f097b32826e02bc73e7381a8ef8b24
3
+ size 87048456
build/torch26-cxx98-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcb950d2e7196ad22cad926749b7e0e06e5454f0a732755b72f0b8dd456529c6
3
- size 86953856
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8b46694ef9faa8dc3bc01413602d4c5a7ce10fdd7e34c6773e778b7917a9066
3
+ size 86949808
build/torch26-cxx98-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module
build/torch26-cxx98-cu126-x86_64-linux/moe/_moe_2218ad7.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe5c605f1da902aebc1d7ce0355b649fcfcc44aed0023fdc87974f3d56273897
3
- size 87417064
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/moe/_moe_c558d47_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6023b925f8f17a3e8dca1f673bdb6348c661e94423f86114f14fef1bf5f69947
3
+ size 87417112
build/torch26-cxx98-cu126-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_2218ad7
3
- ops = torch.ops._moe_2218ad7
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_2218ad7::{op_name}"
 
1
  import torch
2
+ from . import _moe_c558d47_dirty
3
+ ops = torch.ops._moe_c558d47_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c558d47_dirty::{op_name}"
build/torch26-cxx98-cu126-x86_64-linux/moe/layers.py CHANGED
@@ -25,6 +25,8 @@ def _fix_llama4_experts(hidden_states: torch.Tensor, experts: nn.Module):
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
 
 
28
  experts: nn.Module
29
  router: nn.Linear
30
  shared_expert: nn.Module
 
25
 
26
 
27
  class Llama4TextMoe(nn.Module):
28
+ has_backward = False
29
+
30
  experts: nn.Module
31
  router: nn.Linear
32
  shared_expert: nn.Module