Skip to content

Commit

Permalink
Fix torch.hub to not depend on libnat
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: fairinternal/fairseq-py#878

Differential Revision: D17661768

Pulled By: myleott

fbshipit-source-id: 1e4c5f09eb14c40d491ca2459fd2adb8382fb6d2
  • Loading branch information
Myle Ott authored and facebook-github-bot committed Sep 30, 2019
1 parent 1351972 commit acb6fba
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 2 deletions.
9 changes: 8 additions & 1 deletion fairseq/models/insertion_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import libnat

from fairseq.models import register_model, register_model_architecture
from fairseq.models.levenshtein_transformer import (
LevenshteinTransformerDecoder,
Expand Down Expand Up @@ -51,6 +51,13 @@ def compute_score_full(self, L, tau):


def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
raise e

B = in_tokens.size(0)
T = in_tokens.size(1)
V = vocab_size
Expand Down
23 changes: 22 additions & 1 deletion fairseq/models/levenshtein_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import torch
import torch.nn.functional as F
from fairseq import libnat

from fairseq.models import register_model, register_model_architecture
from fairseq.models.model_utils import fill_tensors as _fill, skip_tensors as _skip
from fairseq.models.transformer import (
Expand All @@ -18,6 +18,13 @@


def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
raise e

in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)

with torch.cuda.device_of(in_tokens):
Expand Down Expand Up @@ -60,6 +67,13 @@ def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):


def _get_del_targets(in_tokens, out_tokens, padding_idx):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
raise e

out_seq_len = out_tokens.size(1)

with torch.cuda.device_of(in_tokens):
Expand All @@ -86,6 +100,13 @@ def _get_del_targets(in_tokens, out_tokens, padding_idx):


def _get_del_ins_targets(in_tokens, out_tokens, padding_idx):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n')
raise e

in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)

with torch.cuda.device_of(in_tokens):
Expand Down

0 comments on commit acb6fba

Please # to comment.