This is the old home of fastai v2. The new home is fastai/fastai.
Please head to the new repo. This old repo has been archived.
Temporary home for fastai v2 while it's being developed
Home Page: https://dev.fast.ai
License: Apache License 2.0
This is the old home of fastai v2. The new home is fastai/fastai.
Please head to the new repo. This old repo has been archived.
Calling databunch() on a Datasource leads to an AttributeError.
I did a fresh install from github of fastai2 and fastcore.
I reduced the code to the bare minimum required for execution:
from fastai2.basics import *
DataSource([0]*1000).databunch()
which leads to the following error
AttributeError Traceback (most recent call last)
in
----> 1 DataSource([0]*1000).databunch()
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in databunch(self, bs, val_bs, shuffle_train, n, path, dl_type, dl_kwargs, **kwargs)
153 if dl_type is None: dl_type = self._dl_type
154 dls = [dl_type(self.subset(i), bs=b, shuffle=s, drop_last=s, n=n if i==0 else None, **kwargs, **dk)
--> 155 for i,(b,s,dk) in enumerate(zip(bss,shuffles,dl_kwargs))]
156 return DataBunch(*dls, path=path)
157
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in (.0)
153 if dl_type is None: dl_type = self._dl_type
154 dls = [dl_type(self.subset(i), bs=b, shuffle=s, drop_last=s, n=n if i==0 else None, **kwargs, **dk)
--> 155 for i,(b,s,dk) in enumerate(zip(bss,shuffles,dl_kwargs))]
156 return DataBunch(*dls, path=path)
157
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in init(self, dataset, bs, shuffle, num_workers, **kwargs)
37 for nm in _batch_tfms:
38 kwargs[nm] = Pipeline(kwargs.get(nm,None), as_item=(nm=='before_batch'))
---> 39 super().init(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs)
40 for nm in _batch_tfms: kwargs[nm].setup(self)
41
~/miniconda3/lib/python3.7/site-packages/fastcore/foundation.py in _init(self, *args, **kwargs)
148 if isinstance(arg,MethodType): arg = MethodType(arg.func, self)
149 setattr(self, k, arg)
--> 150 old_init(self, *args, **kwargs)
151 functools.update_wrapper(_init, old_init)
152 cls.init = use_kwargs(cls._methods)(_init)
~/miniconda3/lib/python3.7/site-packages/fastai2/data/load.py in init(self, dataset, bs, num_workers, pin_memory, timeout, batch_size, shuffle, drop_last, indexed, n, device, **kwargs)
73 try: n = len(dataset)
74 except TypeError: pass
---> 75 store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
76 self.rng,self.nw,self.offs = random.Random(),1,0
77 self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
~/miniconda3/lib/python3.7/site-packages/fastcore/utils.py in store_attr(self, nms)
74 "Store params named in comma-separated nms
from calling context into attrs in self
"
75 mod = inspect.currentframe().f_back.f_locals
---> 76 for n in re.split(', *', nms): setattr(self,n,mod[n])
77
78 #Cell
AttributeError: can't set attribute
When using TfmdLists
, WandbCallback returns an error at this line.
The reason is that self.dls.valid_ds
is TfmdLists
so has no tls
attribute (vs Datasets
which is expected here).
At this step, we are trying to create a dataloader with specified idxs for future inference.
On another note, a few other issues are related to this callback:
valid_dl
may have a different batch size (maybe we should get the one from the fit loop) than the number of predictions (default to 36) so we would have to make sure we either run it a few times (if bs < n_preds) or check we have all results.In the lines in nbs, lesson2-download.ipynb:
print(c) file = f'urls_{c}.csv' download_images(path/file, path/c, max_pics=200)
I believe this should have the file as 2nd input, and folder as first input. It works in my google colab using current fastai2.
Another question: how do I find the function: download_images in github?
Hello,
I have a pytorch data dataset:
x_t = torch.Tensor(np.ndarray([16,128,128,3]))
my_dataset = torch.utils.data.TensorDataset(x_t)
my_dloader = torch.utils.data.DataLoader(my_dataset)
img_dblock = DataBlock(blocks=(ImageBlock))
fa_dl = DataLoader(my_dloader)
img_dblock.databunch(fa_dl)
But the last line gives the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-133-994da2df3f53> in <module>
----> 1 img_dblock.databunch(fa_dl)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/block.py in databunch(self, source, path, type_tfms, item_tfms, batch_tfms, **kwargs)
80
81 def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
---> 82 dsrc = self.datasource(source, type_tfms=type_tfms)
83 item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
84 batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/block.py in datasource(self, source, type_tfms)
77 type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
78 lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
---> 79 return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
80
81 def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/core.py in __init__(self, items, tfms, tls, n_inp, dl_type, **kwargs)
233 def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
234 super().__init__(dl_type=dl_type)
--> 235 self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
236 self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
237
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/core.py in <listcomp>(.0)
233 def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
234 super().__init__(dl_type=dl_type)
--> 235 self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
236 self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
237
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/core.py in __init__(self, items, tfms, use_list, do_setup, as_item, split_idx, train_setup, splits, types)
169 splits=None, types=None):
170 super().__init__(items, use_list=use_list)
--> 171 self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
172 if isinstance(tfms,TfmdList): tfms = tfms.tfms
173 if isinstance(tfms,Pipeline): do_setup=False
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in map(self, f, *args, **kwargs)
360 else f.format if isinstance(f,str)
361 else f.__getitem__)
--> 362 return self._new(map(g, self))
363
364 def filter(self, f, negate=False, **kwargs):
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in _new(self, items, *args, **kwargs)
313 @property
314 def _xtra(self): return None
--> 315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
316 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
317 def copy(self): return self._new(self.items.copy())
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __init__(self, items, use_list, match, *rest)
304 if items is None: items = []
305 if (use_list is not None) or not _is_array(items):
--> 306 items = list(items) if use_list else _listify(items)
307 if match is not None:
308 if is_coll(match): match = len(match)
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in _listify(o)
240 if isinstance(o, list): return o
241 if isinstance(o, str) or _is_array(o): return [o]
--> 242 if is_iter(o): return list(o)
243 return [o]
244
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(self, *args, **kwargs)
206 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
207 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 208 return self.fn(*fargs, **kwargs)
209
210 # Cell
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in mask2idxs(mask)
256 if len(mask)==0: return []
257 it = mask[0]
--> 258 if hasattr(it,'item'): it = it.item()
259 if isinstance(it,(bool,NoneType,np.bool_)): return [i for i,m in enumerate(mask) if m]
260 return [int(i) for i in mask]
ValueError: only one element tensors can be converted to Python scalars
I can get e.g. the examples running just fine.
But I think this may be a bug, because at no point I am defining any masks. But it is totally possible I am doing something wrong the data model is still not 100% clear to me
The only other way I can think of converting a pytorch dataloader into a fastai2 one is like this:
tfa_dl = TfmdDL(fa_dl)
tfa_dl.one_batch()
But that also makes an error on the last line:
TypeError Traceback (most recent call last)
<ipython-input-136-cfc6f26b1fa6> in <module>
----> 1 tfa_dl.one_batch()
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/load.py in one_batch(self)
128 def one_batch(self):
129 if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
--> 130 with self.fake_l.no_multiproc(): res = first(self)
131 if hasattr(self, 'it'): delattr(self, 'it')
132 return res
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/utils.py in first(x)
172 def first(x):
173 "First element of `x`, or None if missing"
--> 174 try: return next(iter(x))
175 except StopIteration: return None
176
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/load.py in __iter__(self)
96 self.before_iter()
97 for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
---> 98 if self.device is not None: b = to_device(b, self.device)
99 yield self.after_batch(b)
100 self.after_iter()
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/core.py in device(self)
90 def device(self):
91 if defaults.use_cuda==False: return 'cpu'
---> 92 if not getattr(self, '_device', None): self._one_pass()
93 return self._device
94
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/core.py in _one_pass(self)
41
42 def _one_pass(self):
---> 43 its = self.after_batch(self.do_batch([self.do_item(0)]))
44 self._device = find_device(its)
45 self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/load.py in do_item(self, s)
117 def prebatched(self): return self.bs is None
118 def do_item(self, s):
--> 119 try: return self.after_item(self.create_item(s))
120 except SkipItemException: return None
121 def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/data/load.py in create_item(self, s)
123 def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
124 def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
--> 125 def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
126 def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
127 def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
TypeError: 'DataLoader' object does not support indexing
When running through the camvid example and trying to then subsequently use the classifier as a deployed inference model I run into the following issue:
To reproduce:
Option 1
1a. just put learn.save('camvid-v2')
at the end of the notebook
2a. in another notebook run:
learner = load_learner('/home/jakub/.fastai/data/camvid/models/camvid-v2.pth')
If this is the case then the error is:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-9-444a84ec5d8a> in <module>
----> 1 learner = load_learner('/home/jakub/.fastai/data/camvid/models/camvid-v2.pth')
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in load_learner(fname, cpu)
590 res = torch.load(fname, map_location='cpu' if cpu else None)
591 if hasattr(res, 'to_fp32'): res = res.to_fp32()
--> 592 if cpu: res.dls.cpu()
593 return res
594
AttributeError: 'dict' object has no attribute 'dls'
3a. If I instead write:
learner = load_learner('/home/jakub/.fastai/data/camvid/models/camvid-v2.pth', cpu=False)
Then line runs, but:
learner.predict(test_img)
returns:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-11-549ed820d3d9> in <module>
----> 1 learner.predict(test_img)
AttributeError: 'dict' object has no attribute 'predict'
Because it is type dict
.
I tried looking at @muellerzr tutorial for deployment but it seems that that the same issue does not appear there.
Option 2
1b: If I instead use the export
method, such as learner.export('camvid-test')
I have the following problem:
2b: After running:
fname = '/home/jakub/.fastai/data/camvid/camvid-test'
learn = load_learner(fname)
I get :
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-23-c81f353d3f0d> in <module>
1 fname = '/home/jakub/.fastai/data/camvid/camvid-test'
----> 2 learn = load_learner(fname)
3 # res = torch.load(fname)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in load_learner(fname, cpu)
588 def load_learner(fname, cpu=True):
589 "Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
--> 590 res = torch.load(fname, map_location='cpu' if cpu else None)
591 if hasattr(res, 'to_fp32'): res = res.to_fp32()
592 if cpu: res.dls.cpu()
~/daisy-gan/venv/lib/python3.6/site-packages/torch/serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
424 if sys.version_info >= (3, 0) and 'encoding' not in pickle_load_args.keys():
425 pickle_load_args['encoding'] = 'utf-8'
--> 426 return _load(f, map_location, pickle_module, **pickle_load_args)
427 finally:
428 if new_fd:
~/daisy-gan/venv/lib/python3.6/site-packages/torch/serialization.py in _load(f, map_location, pickle_module, **pickle_load_args)
611 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
612 unpickler.persistent_load = persistent_load
--> 613 result = unpickler.load()
614
615 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
AttributeError: Can't get attribute 'label_function' on <module '__main__'>
Apologies for the confusion, took me a while to retrace all the things I tried. Will continue to investigate.
path = 'data/en-100_tok/'
mult = 4
bs = 80
seq_len = 70
lm = DataBlock(blocks=(TextBlock(get_tokenizer(), vocab=vocab, is_lm=True, seq_len=seq_len),),
get_x=read_tokenized_file,
get_items=partial(get_text_files, folders=['train', 'valid']),
splitter=splitter)
dbunch_lm = lm.databunch(path, path=path, bs=bs, seq_len=seq_len)
This worked with master from yesterday. Today (had to change what I pass to TextBlock
initializer) I get the following error
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-3518d3576753> in <module>
9 splitter=splitter)
10
---> 11 dbunch_lm = lm.databunch(path, path=path, bs=bs, seq_len=seq_len)
~/workspace/fastai2/fastai2/data/block.py in databunch(self, source, path, type_tfms, item_tfms, batch_tfms, **kwargs)
80
81 def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
---> 82 dsrc = self.datasource(source, type_tfms=type_tfms)
83 item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
84 batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
~/workspace/fastai2/fastai2/data/block.py in datasource(self, source, type_tfms)
77 type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
78 lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
---> 79 return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
80
81 def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
~/workspace/fastai2/fastai2/data/core.py in __init__(self, items, tfms, tls, n_inp, dl_type, **kwargs)
248 def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
249 super().__init__(dl_type=dl_type)
--> 250 self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
251 self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
252
~/workspace/fastai2/fastai2/data/core.py in <listcomp>(.0)
248 def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
249 super().__init__(dl_type=dl_type)
--> 250 self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
251 self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
252
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
~/workspace/fastai2/fastai2/data/core.py in __init__(self, items, tfms, use_list, do_setup, as_item, split_idx, train_setup, splits, types)
189 self.tfms = Pipeline(tfms, as_item=as_item, split_idx=split_idx)
190 self.types = types
--> 191 if do_setup: self.setup(train_setup=train_setup)
192
193 def _new(self, items, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, **kwargs)
~/workspace/fastai2/fastai2/data/core.py in setup(self, train_setup)
202
203 def setup(self, train_setup=True):
--> 204 self.tfms.setup(self, train_setup)
205 if len(self) != 0:
206 x = super().__getitem__(0) if self.splits is None else super().__getitem__(self.splits[0])[0]
TypeError: setup() takes from 1 to 2 positional arguments but 3 were given
The PILBase.create method already can handle tensors, ndarrays and bytes in addition to a filename as input, but the type annotation only allows Path and str, so the typedispatch ignores everything else and returns the input.
Is there a reason for this?
I changed it locally and it worked just fine, am happy to create a PR to change this.
Hi.
I noticed that XSENet family have been in https://github.com/fastai/fastai2/blob/bb49c1794b82271cd61ae19287a1e7430559bb97/fastai2/vision/models/xresnet.py#L95
for quite a while.
Will they be ready to use in any soon?
First of all, fastai2 is brilliant!!
I found that current default parameters for ranger optimizer are:
ranger(p, lr, mom=0.95, wd=0.01, eps=1e-06, sqr_mom=0.99, beta=0.0, decouple_wd=True)
However, they are different from lessw2020 repo (https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer). They are:
class Ranger(...):
...
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95,0.999), eps=1e-5, weight_decay=0):
...
So, sqr_mon is 0.999 instead of 0.99, eps is 1e-5 and weight_decay=0. Maybe, @lessw2020 point out which are better
https://colab.research.google.com/drive/1JS3zoP_TCl3kmogQWYwM0tO2vBIrMaR9
I can not run it after downgrading to pytorch 1.0.1 with fastai 1.0.51
Seems to be an issue with a recent commit, when attempting to databunch COCO I receive the following stack trace:
path = untar_data(URLs.COCO_SAMPLE)
dblock = DataBlock(blocks=(ImageBlock, ImageBlock),
get_items=get_image_files,
splitter=RandomSplitter(0.1, seed=42))
dbunch = dblock.databunch(path, bs=22, item_tfms=[Resize(224)],
batch_tfms=[Normalize.from_stats(*imagenet_stats)])
Stack Trace:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-129-76a6dc7939cf> in <module>()
1 dbunch = dblock.databunch(path, bs=22, item_tfms=[Resize(224)],
----> 2 batch_tfms=[Normalize.from_stats(*imagenet_stats)])
6 frames
/usr/local/lib/python3.6/dist-packages/fastai2/data/block.py in databunch(self, source, path, type_tfms, item_tfms, batch_tfms, **kwargs)
80
81 def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
---> 82 dsrc = self.datasource(source, type_tfms=type_tfms)
83 item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
84 batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
/usr/local/lib/python3.6/dist-packages/fastai2/data/block.py in datasource(self, source, type_tfms)
77 type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
78 lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
---> 79 return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
80
81 def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in __init__(self, items, tfms, tls, n_inp, dl_type, **kwargs)
227 def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
228 super().__init__(dl_type=dl_type)
--> 229 self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
230 self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
231
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in <listcomp>(.0)
227 def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
228 super().__init__(dl_type=dl_type)
--> 229 self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
230 self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
231
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in __init__(self, items, tfms, use_list, do_setup, as_item, split_idx, train_setup, splits, types)
173 self.tfms = Pipeline(tfms, as_item=as_item, split_idx=split_idx)
174 self.types = types
--> 175 if do_setup: self.setup(train_setup=train_setup)
176
177 def _new(self, items, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, **kwargs)
/usr/local/lib/python3.6/dist-packages/fastai2/data/core.py in setup(self, train_setup)
186
187 def setup(self, train_setup=True):
--> 188 self.tfms.setup(self, train_setup)
189 if len(self) != 0:
190 x,self.types = super().__getitem__(0),[]
TypeError: setup() takes from 1 to 2 positional arguments but 3 were given
Hello,
I encountered the following error when trying to train unet using unet_learner, but the error happened when it trained at epoch 7, validation .
The problem is in line "getattr(self.items,'iloc',self.items)[i]"
is a length 1 "list", but index "i" is 1, which causes this error. Any work-around?
Traceback (most recent call last):
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 280, in fit
self._do_epoch_train()
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 255, in _do_epoch_train
self.all_batches()
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 233, in all_batches
for o in enumerate(self.dl): self.one_batch(*o)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 247, in one_batch
finally: self('after_batch')
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 214, in __call__
def __call__(self, event_name): L(event_name).map(self._call_one)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 362, in map
return self._new(map(g, self))
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 315, in _new
def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 41, in __call__
res = super().__call__(*((x,) + args), **kwargs)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 306, in __init__
items = list(items) if use_list else _listify(items)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 242, in _listify
if is_iter(o): return list(o)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 208, in __call__
return self.fn(*fargs, **kwargs)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 217, in _call_one
[cb(event_name) for cb in sort_by_run(self.cbs)]
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 217, in <listcomp>
[cb(event_name) for cb in sort_by_run(self.cbs)]
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 20, in __call__
if self.run: getattr(self, event_name, noop)()
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/callback/tracker.py", line 26, in after_batch
if torch.isinf(self.loss) or torch.isnan(self.loss): raise CancelFitException
fastcore.utils.CancelFitException
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 209, in added_cbs
yield
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 283, in fit
finally: self('after_epoch')
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 214, in __call__
def __call__(self, event_name): L(event_name).map(self._call_one)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 362, in map
return self._new(map(g, self))
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 315, in _new
def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 41, in __call__
res = super().__call__(*((x,) + args), **kwargs)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 306, in __init__
items = list(items) if use_list else _listify(items)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 242, in _listify
if is_iter(o): return list(o)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 208, in __call__
return self.fn(*fargs, **kwargs)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 217, in _call_one
[cb(event_name) for cb in sort_by_run(self.cbs)]
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 217, in <listcomp>
[cb(event_name) for cb in sort_by_run(self.cbs)]
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/learner.py", line 20, in __call__
if self.run: getattr(self, event_name, noop)()
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/callback/tracker.py", line 104, in after_epoch
super().after_epoch()
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastai2/callback/tracker.py", line 47, in after_epoch
val = self.recorder.values[-1][self.idx]
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 316, in __getitem__
def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
File "/home/wei_li/anaconda3/envs/mainpy3.7/lib/python3.7/site-packages/fastcore/foundation.py", line 320, in _get
if is_indexer(i) or isinstance(i,slice): return getattr(self.items,'iloc',self.items)[i]
IndexError: list index out of range
Hi, after installing fastai2 through pip, I get the following error when trying to call from fastai2.callback.all import *
:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-74228919d9ab> in <module>()
1 from fastai2.basics import *
2 from fastai2.tabular.all import *
----> 3 from fastai2.callback.all import *
2 frames
/usr/local/lib/python3.6/dist-packages/fastai2/vision/core.py in <module>()
19 #Cell
20 if not hasattr(Image,'_patched'):
---> 21 _old_sz = Image.Image.size.fget
22 @patch_property
23 def size(x:Image.Image): return Tuple(_old_sz(x))
AttributeError: type object 'Image' has no attribute 'size'
Could this problem be caused by a specific version of PIL or the patching logic itself?
I create a LM as follows:
WD=1e-7
def opt(params, lr):
return Adam(params, lr, mom=0.8, sqr_mom=0.99)
drops = np.array([0.25, 0.1, 0.2, 0.02, 0.15])
config = dict(
emb_sz=100,
n_hid=1024,
n_layers=3,
input_p=drops[0],
hidden_p=drops[1],
weight_p=drops[2],
embed_p=drops[3])
awd_lstm_lm_config.update(config)
learn = language_model_learner(
dbunch_lm,
AWD_LSTM,
opt_func=opt,
pretrained=False,
config=awd_lstm_lm_config,
drop_mult=0.2,
metrics=[accuracy, Perplexity()]
)
and get the following error when calling learner.get_preds()
the SaveModelCallback has a call on 'on_train_end'
i couldn't find such a state anywhere in the code, and couldn't catch a breakpoint on it
should it be replaced with 'after_fit' ?
I was occasionally facing the following bug when feeding forward my model (e.g. while inlr_find
):
/usr/local/lib/python3.6/dist-packages/fastai2/layers.py in forward(self, x)
401 "Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
402 def __init__(self, dense:bool=False): self.dense=dense
--> 403 def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)
404
405 #Cell
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 1. Got 256 and 640 in dimension 2
I decided to investigate and traced back the error to the following (which can be find under unet_learner
):
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
What happens is that I'm applying a resize to my when creating by databunch, like so:
dbch = dblock.databunch(path, bs=bs, item_tfms=[Resize(sz)]...
But dbunch.train_ds[0][0].size
get's the original size of the image, not the transformed one, and this causes the error. (If instead dbunch.one_batch()[0].shape[-2:]
executes everthing works correctly).
I decided to post this as a bug instead of a PR because the try/except
must be there for a reason. For me only having the except case fixed the issue, but that might cause some unexpected damage.
Calling torch.distributed.all_reduce()
is needed In order to obtain the sum of all tensors (loss, top_accuracy...) at all processes when running in distributed training.
I saw that you already wrote a function that does exactly that _maybe_reduce(val)
and I'm not sure if it used anywhere in the distributed callback or anywhere else in the training pipeline.
In the lesson 1 notebook there is an issue with the RegexLabeller
missing some files. This is because some of the images are .png in the dataset. I have a proposed fix of the expression being:
r'/([^/]+)_\d+.*'
I'm prepping my own notebooks right now so I don't have time to fix this myself until a later date (but I can if it is not fixed by then :) )
If we use a callback that has self.run
set to False
, they will never run in the future when starting a new loop.
Example:
learn = cnn_learner(…, cbs=WandbCallback(log_preds=False))
learn.fit(1) # callback runs
# any of the 2 lines below prevent the cb to run in the next "fit" loop
lean.show_results()
learn.lr_finder()
learn.fit(1) # callback does not run
The callbacks use self.run
to decide if they run or not and are typically set back to True at after_fit
. However this does not execute with certain methods such as show_results
and lr_finder
.
Proposed solution:
begin_fit
should always execute even if self.run==False
to reevaluate self.run
Hey,
After pulling the latest commit, it appears that Distributed Training stopped working.
we are getting this error:
Expected object of backend CUDA but got backend CPU for argument #2 'target' in call to _thnn_nll_loss_forward
we checked the device type of self.pred
on after_pred() and it appears that the self.pred
is on the cpu
and not on cuda
the latest commit that distributed training worked is around January 13'th.
maybe it's related to this commit: 2c94766
I am stuck on this for several days so I thought I’d post it here.
I find there is something odd with the memory management after finishing an epoch. I tried to run Fastai U-Net on the Synthia dataset and this is an error I get despite having a batchsize 1 with a crop to 360 on an 11GB K80. I must be missing something, because camvid runs fine: (also does not seem to happen if I only load 10 examples to the dataset)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-18-637f3e7802b9> in <module>()
----> 1 learn.fit_one_cycle(10,slice(1e-6,1e-3), cbs=WandbCallback())
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
88 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
89 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
---> 90 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
91
92 # Cell
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
290 self._do_epoch_validate()
291 except CancelEpochException: self('after_cancel_epoch')
--> 292 finally: self('after_epoch')
293
294 except CancelFitException: self('after_cancel_fit')
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/learner.py in __call__(self, event_name)
221 def ordered_cbs(self, cb_func:str): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
222
--> 223 def __call__(self, event_name): L(event_name).map(self._call_one)
224 def _call_one(self, event_name):
225 assert hasattr(event, event_name)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastcore/foundation.py in map(self, f, *args, **kwargs)
360 else f.format if isinstance(f,str)
361 else f.__getitem__)
--> 362 return self._new(map(g, self))
363
364 def filter(self, f, negate=False, **kwargs):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastcore/foundation.py in _new(self, items, *args, **kwargs)
313 @property
314 def _xtra(self): return None
--> 315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
316 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
317 def copy(self): return self._new(self.items.copy())
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastcore/foundation.py in __init__(self, items, use_list, match, *rest)
304 if items is None: items = []
305 if (use_list is not None) or not _is_array(items):
--> 306 items = list(items) if use_list else _listify(items)
307 if match is not None:
308 if is_coll(match): match = len(match)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastcore/foundation.py in _listify(o)
240 if isinstance(o, list): return o
241 if isinstance(o, str) or _is_array(o): return [o]
--> 242 if is_iter(o): return list(o)
243 return [o]
244
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastcore/foundation.py in __call__(self, *args, **kwargs)
206 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
207 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 208 return self.fn(*fargs, **kwargs)
209
210 # Cell
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/learner.py in _call_one(self, event_name)
224 def _call_one(self, event_name):
225 assert hasattr(event, event_name)
--> 226 [cb(event_name) for cb in sort_by_run(self.cbs)]
227
228 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/learner.py in <listcomp>(.0)
224 def _call_one(self, event_name):
225 assert hasattr(event, event_name)
--> 226 [cb(event_name) for cb in sort_by_run(self.cbs)]
227
228 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/learner.py in __call__(self, event_name)
23 _run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or
24 (self.run_valid and not getattr(self, 'training', False)))
---> 25 if self.run and _run: getattr(self, event_name, noop)()
26
27 @property
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/callback/wandb.py in after_epoch(self)
64 if self.log_preds:
65 b = self.valid_dl.one_batch()
---> 66 self.learn.one_batch(0, b)
67 preds = getattr(self.loss_func, 'activation', noop)(self.pred)
68 out = getattr(self.loss_func, 'decodes', noop)(preds)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/learner.py in one_batch(self, i, b)
246 try:
247 self._split(b); self('begin_batch')
--> 248 self.pred = self.model(*self.xb); self('after_pred')
249 if len(self.yb) == 0: return
250 self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
545 result = self._slow_forward(*input, **kwargs)
546 else:
--> 547 result = self.forward(*input, **kwargs)
548 for hook in self._forward_hooks.values():
549 hook_result = hook(self, input, result)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/layers.py in forward(self, x)
415 for l in self.layers:
416 res.orig = x
--> 417 nres = l(res)
418 # We have to remove res.orig to avoid hanging refs and therefore memory leaks
419 res.orig = None
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
545 result = self._slow_forward(*input, **kwargs)
546 else:
--> 547 result = self.forward(*input, **kwargs)
548 for hook in self._forward_hooks.values():
549 hook_result = hook(self, input, result)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai2/vision/models/unet.py in forward(self, up_in)
38 if ssh != up_out.shape[-2:]:
39 up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
---> 40 cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
41 return self.conv2(self.conv1(cat_x))
42
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
545 result = self._slow_forward(*input, **kwargs)
546 else:
--> 547 result = self.forward(*input, **kwargs)
548 for hook in self._forward_hooks.values():
549 hook_result = hook(self, input, result)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/activation.py in forward(self, input)
92
93 def forward(self, input):
---> 94 return F.relu(input, inplace=self.inplace)
95
96 def extra_repr(self):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/functional.py in relu(input, inplace)
911 result = torch.relu_(input)
912 else:
--> 913 result = torch.relu(input)
914 return result
915
RuntimeError: CUDA out of memory. Tried to allocate 508.00 MiB (GPU 0; 11.17 GiB total capacity; 10.43 GiB already allocated; 4.81 MiB free; 419.63 MiB cached)
I was so far unable to replicate this on Camvid. But I will keep trying. The odd thing is that on Camvid (essentially the same dataset), with bs=8 and much larger image size, the % MEM used is ~50.
Further interesting aspects:
n_out
, but theoretically the dataset should have 15 classes.WandbCallback
the thing seems to run.I started a fresh install of fastai2 and got the following error when installing with pip:
(fastai2) paperspace@ps4y32elp:~/fastai2$ pip install git+https://github.com/fastai/fastai2
Collecting git+https://github.com/fastai/fastai2
Cloning https://github.com/fastai/fastai2 to /tmp/pip-req-build-osgksw0h
Running command git clone -q https://github.com/fastai/fastai2 /tmp/pip-req-build-osgksw0h
ERROR: Command errored out with exit status 1:
command: /home/paperspace/anaconda3/envs/fastai2/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-req-build-osgksw0h/setup.py'"'"'; __file__='"'"'/tmp/pip-req-build-osgksw0h/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base /tmp/pip-req-build-osgksw0h/pip-egg-info
cwd: /tmp/pip-req-build-osgksw0h/
Complete output (5 lines):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-req-build-osgksw0h/setup.py", line 1, in <module>
from packaging.version import parse
ModuleNotFoundError: No module named 'packaging'
----------------------------------------
ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
This was solved by running pip install packaging
and re-rerunning the pip install command for fastai2, but I wasn't sure where upstream this fix could/should be made. Just wanted to let you all know.
I know you're in the middle of refactoring, but I'm getting the following error:
~/projects/fastai2/fastai2/imports.py in <module>
27 from scipy import ndimage
28 from IPython.core.debugger import set_trace
---> 29 from fastcore.all import *
30 from fastprogress.fastprogress import progress_bar, master_bar
31
ModuleNotFoundError: No module named 'fastcore.all'
I forked the fastcore repo to attempt to solve it locally, but I'm unsure how to configure the nbdev settings to build the all.py
file as found in many of the fastai modules.
Is there something I can do locally to resolve this for now?
fastai2 version: 0.0.8
(installed from: 0ea6d11)
fastcore version: 0.1.11
(installed from: bb49eea)
I'm trying to use the latest version of fastai2 and have installed fastcore
and fastai2
from their respective git repositories. I'm following this guide: http://dev.fast.ai/tutorial.transfer_learning
After creating a databunch I try to display it with: dbunch.show_batch(max_n=9)
but I get an error about mismatched dimensions. Typically I've seen this error when I've forgotten to resize the images to be the same size, but I would have thought one of the transforms RandomResizedCrop
or aug_transforms(size=299, max_warp=0)
would have accounted for this.
Full stack trace:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-12-9647f9a9f88a> in <module>
----> 1 dbunch.show_batch(max_n=9)
~/git/fastai2/fastai2/data/core.py in show_batch(self, b, max_n, ctxs, show, **kwargs)
88
89 def show_batch(self, b=None, max_n=9, ctxs=None, show=True, **kwargs):
---> 90 if b is None: b = self.one_batch()
91 if not show: return self._pre_show_batch(b, max_n=max_n)
92 show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
~/git/fastai2/fastai2/data/load.py in one_batch(self)
128 def one_batch(self):
129 if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
--> 130 with self.fake_l.no_multiproc(): res = first(self)
131 if hasattr(self, 'it'): delattr(self, 'it')
132 return res
~/git/fastcore/fastcore/utils.py in first(x)
172 def first(x):
173 "First element of `x`, or None if missing"
--> 174 try: return next(iter(x))
175 except StopIteration: return None
176
~/git/fastai2/fastai2/data/load.py in __iter__(self)
95 self.randomize()
96 self.before_iter()
---> 97 for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
98 if self.device is not None: b = to_device(b, self.device)
99 yield self.after_batch(b)
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/torch/utils/data/dataloader.py in __next__(self)
344 def __next__(self):
345 index = self._next_index() # may raise StopIteration
--> 346 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
347 if self._pin_memory:
348 data = _utils.pin_memory.pin_memory(data)
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
32 raise StopIteration
33 else:
---> 34 data = next(self.dataset_iter)
35 return self.collate_fn(data)
36
~/git/fastai2/fastai2/data/load.py in create_batches(self, samps)
104 self.it = iter(self.dataset) if self.dataset is not None else None
105 res = filter(lambda o:o is not None, map(self.do_item, samps))
--> 106 yield from map(self.do_batch, self.chunkify(res))
107
108 def new(self, dataset=None, cls=None, **kwargs):
~/git/fastai2/fastai2/data/load.py in do_batch(self, b)
125 def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
126 def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
--> 127 def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
128 def one_batch(self):
129 if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
~/git/fastai2/fastai2/data/load.py in create_batch(self, b)
124 def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
125 def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
--> 126 def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
127 def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
128 def one_batch(self):
~/git/fastai2/fastai2/data/load.py in fa_collate(t)
44 b = t[0]
45 return (default_collate(t) if isinstance(b, _collate_types)
---> 46 else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
47 else default_collate(t))
48
~/git/fastai2/fastai2/data/load.py in <listcomp>(.0)
44 b = t[0]
45 return (default_collate(t) if isinstance(b, _collate_types)
---> 46 else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
47 else default_collate(t))
48
~/git/fastai2/fastai2/data/load.py in fa_collate(t)
43 def fa_collate(t):
44 b = t[0]
---> 45 return (default_collate(t) if isinstance(b, _collate_types)
46 else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
47 else default_collate(t))
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py in default_collate(batch)
53 storage = elem.storage()._new_shared(numel)
54 out = elem.new(storage)
---> 55 return torch.stack(batch, 0, out=out)
56 elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
57 and elem_type.__name__ != 'string_':
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 500 and 333 in dimension 2 at /opt/conda/conda-bld/pytorch_1573049306803/work/aten/src/TH/generic/THTensor.cpp:689
When I run verify_images
like so:
path_b = Path('birds/blue_bird')
verify_images(path_b.ls())
It will show that there is a bad image at 'birds/blue_bird/00000047.jpg') however it never winds up deleting said image.
i am trying to send a distributed run via 'launch' script:
python -m fastai2.launch train.py --model_name=xresnet
on a 1xV100 machine, then run succeed
on a 2xV100 machine, the run fails and i get an error message:
File "/opt/conda/lib/python3.6/site-packages/fastai2/distributed.py", line 91, in begin_fit
self.learn.model = DistributedDataParallel(self.model, device_ids=[self.cuda_id], output_device=self.cuda_id)
File "/opt/conda/lib/python3.6/site-packages/torch/nn/parallel/distributed.py", line 298, in init
self.broadcast_bucket_size)
File "/opt/conda/lib/python3.6/site-packages/torch/nn/parallel/distributed.py", line 480, in _distributed_broadcast_coalesced
dist._broadcast_coalesced(self.process_group, tensors, buffer_size)
RuntimeError: Broken pipe
i pulled the latest version of fastai2 from 25.11
Hi,
Adding Tensorboard callback isn't supported when running in DDP, getting this error:
self.writer = SummaryWriter(log_dir=self.log_dir)
File "/opt/conda/lib/python3.6/site-packages/torch/utils/tensorboard/writer.py", line 221, in __init__
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: 'runs/Dec01_17-51-15_job-28573-chief-0'
I assume it should be a similar solution to how you it was handled in SaveModelCallback
The notebook 21 does not run completely and fails at line:
learn = unet_learner(dls, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1))
Here is the error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-31-bd945bd2ef1c> in <module>
----> 1 learn = unet_learner(dls, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1))
2 learn = unet_learner(dls, models.resnet34, pretrained=True, n_in=4)
<ipython-input-28-429ea9b9ca5e> in unet_learner(dls, arch, loss_func, pretrained, cut, splitter, config, n_in, n_out, **kwargs)
11 model = models.unet.DynamicUnet(body, n_out, size, **config)
12 learn = Learner(dls, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
---> 13 if pretrained: learn.freeze()
14 return learn
~/Projects/fastai2/fastai2/learner.py in freeze(self)
564
565 @patch
--> 566 def freeze(self:Learner): self.freeze_to(-1)
567
568 @patch
~/Projects/fastai2/fastai2/learner.py in freeze_to(self, n)
559 @patch
560 def freeze_to(self:Learner, n):
--> 561 if self.opt is None: self.create_opt()
562 self.opt.freeze_to(n)
563 self.opt.clear_state()
~/Projects/fastai2/fastai2/learner.py in create_opt(self)
233 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
234 def create_opt(self):
--> 235 self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
236 if not self.wd_bn_bias:
237 for p in self._bn_bias_state(True ): p['do_wd'] = False
<ipython-input-23-75cf9ffd62a9> in _resnet_split(m)
1 #export
2 def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
----> 3 def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
4 def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
5 def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
~/Projects/fastcore/fastcore/foundation.py in map(self, f, *args, **kwargs)
360 else f.format if isinstance(f,str)
361 else f.__getitem__)
--> 362 return self._new(map(g, self))
363
364 def filter(self, f, negate=False, **kwargs):
~/Projects/fastcore/fastcore/foundation.py in _new(self, items, *args, **kwargs)
313 @property
314 def _xtra(self): return None
--> 315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
316 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
317 def copy(self): return self._new(self.items.copy())
~/Projects/fastcore/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
~/Projects/fastcore/fastcore/foundation.py in __init__(self, items, use_list, match, *rest)
304 if items is None: items = []
305 if (use_list is not None) or not _is_array(items):
--> 306 items = list(items) if use_list else _listify(items)
307 if match is not None:
308 if is_coll(match): match = len(match)
~/Projects/fastcore/fastcore/foundation.py in _listify(o)
240 if isinstance(o, list): return o
241 if isinstance(o, str) or _is_array(o): return [o]
--> 242 if is_iter(o): return list(o)
243 return [o]
244
~/Projects/fastcore/fastcore/foundation.py in __call__(self, *args, **kwargs)
206 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
207 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 208 return self.fn(*fargs, **kwargs)
209
210 # Cell
~/Projects/fastai2/fastai2/torch_core.py in params(m)
496 def params(m):
497 "Return all parameters of `m`"
--> 498 return [p for p in m.parameters()]
499
500 # Cell
AttributeError: 'tuple' object has no attribute 'parameters'
It runs if we do pretrained=False but will fail at the same step when we do learn.fit(1).
Looks like there's 4 layers in the head:
https://github.com/fastai/fastai2/blob/master/fastai2/vision/models/xresnet.py#L41
However, cut says -4
https://github.com/fastai/fastai2/blob/master/fastai2/vision/learner.py#L86
Test case:
test_eq(bool(_is_pool_type(models.xresnet.xresnet34()[_xresnet_meta['cut']])), True)
When trying to use U-net with odd sized images this error is thrown:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-25-325776f19633> in <module>
1 timg = test_pipe('tests/stata.jpg').cuda()
----> 2 pred = TensorImage(learn.model(timg)).cpu()
~/anaconda3/envs/dl2/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~/forks/fastai2/fastai2/layers.py in forward(self, x)
386 for l in self.layers:
387 res.orig = x
--> 388 nres = l(res)
389 # We have to remove res.orig to avoid hanging refs and therefore memory leaks
390 res.orig = None
~/anaconda3/envs/dl2/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~/forks/fastai2/fastai2/layers.py in forward(self, x)
401 "Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
402 def __init__(self, dense:bool=False): self.dense=dense
--> 403 def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)
404
405 #Cell
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 1. Got 679 and 680 in dimension 2 at /opt/conda/conda-bld/pytorch_1573049306803/work/aten/src/THC/generic/THCTensorMath.cu:71
And checking the shapes this is what we have:
ipdb> x.shape
torch.Size([1, 64, 680, 1024])
ipdb> x.orig.shape
torch.Size([1, 3, 679, 1024])
So what I'm guessing here is that the padding on the conv layer of the network is increasing the img dimensions by 1, when the image get reconstructed by the decoder it always get rounded up to the next even number.
A solution would be to add asymmetrical padding to all the network when the image is odd, but I imagine that would be really messy really fast. Another solution would be to always pad (or resize) the image at the beginning of the network to make sure it's always even.
I would like to work on this issue and some input on what would be the best strategy would be very helpful =)
when trying to initialize xresnet50 with pretrained=True, no error appears but performance is terrible.
The reason is that there were some renamings ("convs" -> "convpath", "idconv" -> "idpath" etc...) so the keys in the state dicts dont match anymore.
Because strict=False in the _xresnet()
function, no error is thrown.
I fixed the names and uploaded the updated state_dict here
This version does not include the optimizer state but I don't think that's important.
Hello,
I install the latest fastai2, and get the following error:
File "python3.7/site-packages/fastai2/layers.py", line 35, in c
__repr__ = basic_repr(all_flds)
NameError: name 'basic_repr' is not defined
Is there a currently supported way of specifying a sampler to a data loader?
In https://github.com/fastai/fastai2/blame/c7e1c48c448e03491a34bf143cb3c5c969219781/nbs/examples/train_wt2.py#L32 it passes sampler
to TfmdDL
but this appears to be ignored entirely (at least in current master).
Or is the approach to achieving this different in fastai2? In my case I want to balance an unbalanced class dataset in the sampler
Thanks!
i updated fastai and fastcore to latest version (9/1/2020), and got the following error:
File "...\fastcore\foundation.py", line 41, in __call__ res = super().__call__(*((x,) + args), **kwargs) File "...\fastai2\data\core.py", line 175, in __init__ if do_setup: self.setup(train_setup=train_setup) File "...fastai2\data\core.py", line 188, in setup self.tfms.setup(self, train_setup) TypeError: setup() takes from 1 to 2 positional arguments but 3 were given
missing nbdev in the env.yml
There was a change introduced in pillow==7.0.0
that is conflicting with torchvision
. The breaking change is documented in the pillow project's changelog. The issue it creates with various other libraries that depends on pillow
can be found in a GitHub Issue in the torchvision
repo and also in a GitHub Issue in the pillow
repo.
Package managers like poetry
or pipenv
relies on the upstream projects to define which versions of the dependencies they are compatible with. In the environment.yml
we currently don't set any particular limit to which version of pillow
we can install, which means that the package manager will try to install the most recent one.
The best way I've found to prevent the problem is to downgrade pillow
to the most recent version before 7.0.0. This works in my local development environment, but I think it would be great if we could specify it also in the main project so that we don't have to do it manually every time.
I think this is a function of the latest changes (that I generally massively support), but just thought I'd point out this issue. I will investigate it tomorrow. To see if I can fix it.
On the line in the camvid notebook:
learn.fit_one_cycle(10, slice(lr), pct_start=0.9, wd=1e-2)
If we add (incl. appropriate imports & setup).
learn.fit_one_cycle(10, slice(lr), pct_start=0.9, wd=1e-2, cbs=WandbCallback())
It raises this issue:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
284 try:
--> 285 self._do_begin_fit(n_epoch)
286 for epoch in range(n_epoch):
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in _do_begin_fit(self, n_epoch)
258 def _do_begin_fit(self, n_epoch):
--> 259 self.n_epoch,self.loss = n_epoch,tensor(0.); self('begin_fit')
260
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in __call__(self, event_name)
222
--> 223 def __call__(self, event_name): L(event_name).map(self._call_one)
224 def _call_one(self, event_name):
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in map(self, f, *args, **kwargs)
361 else f.__getitem__)
--> 362 return self._new(map(g, self))
363
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in _new(self, items, *args, **kwargs)
314 def _xtra(self): return None
--> 315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
316 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __init__(self, items, use_list, match, *rest)
305 if (use_list is not None) or not _is_array(items):
--> 306 items = list(items) if use_list else _listify(items)
307 if match is not None:
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in _listify(o)
241 if isinstance(o, str) or _is_array(o): return [o]
--> 242 if is_iter(o): return list(o)
243 return [o]
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(self, *args, **kwargs)
207 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 208 return self.fn(*fargs, **kwargs)
209
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in _call_one(self, event_name)
225 assert hasattr(event, event_name)
--> 226 [cb(event_name) for cb in sort_by_run(self.cbs)]
227
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in <listcomp>(.0)
225 assert hasattr(event, event_name)
--> 226 [cb(event_name) for cb in sort_by_run(self.cbs)]
227
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in __call__(self, event_name)
24 (self.run_valid and not getattr(self, 'training', False)))
---> 25 if self.run and _run: getattr(self, event_name, noop)()
26
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/callback/wandb.py in begin_fit(self)
47 test_tls = [tl._new(items, split_idx=1) for tl in self.dls.valid_ds.tls]
---> 48 self.valid_dl = self.dls.valid_dl.new(Datasets(tls=test_tls), bs=self.n_preds)
49
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __getattr__(self, k)
222 if attr is not None: return getattr(attr, k)
--> 223 raise AttributeError(k)
224 def __dir__(self): return custom_dir(self, self._dir() if self._xtra is None else self._dir())
AttributeError: valid_dl
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-19-cadf93af9e73> in <module>
----> 1 learn.fit_one_cycle(10, slice(lr), pct_start=0.9, wd=1e-2, cbs=WandbCallback())
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
88 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
89 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
---> 90 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
91
92 # Cell
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
293
294 except CancelFitException: self('after_cancel_fit')
--> 295 finally: self('after_fit')
296
297 def validate(self, ds_idx=1, dl=None, cbs=None):
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in __call__(self, event_name)
221 def ordered_cbs(self, cb_func:str): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
222
--> 223 def __call__(self, event_name): L(event_name).map(self._call_one)
224 def _call_one(self, event_name):
225 assert hasattr(event, event_name)
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in map(self, f, *args, **kwargs)
360 else f.format if isinstance(f,str)
361 else f.__getitem__)
--> 362 return self._new(map(g, self))
363
364 def filter(self, f, negate=False, **kwargs):
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in _new(self, items, *args, **kwargs)
313 @property
314 def _xtra(self): return None
--> 315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
316 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
317 def copy(self): return self._new(self.items.copy())
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __init__(self, items, use_list, match, *rest)
304 if items is None: items = []
305 if (use_list is not None) or not _is_array(items):
--> 306 items = list(items) if use_list else _listify(items)
307 if match is not None:
308 if is_coll(match): match = len(match)
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in _listify(o)
240 if isinstance(o, list): return o
241 if isinstance(o, str) or _is_array(o): return [o]
--> 242 if is_iter(o): return list(o)
243 return [o]
244
~/daisy-gan/venv/lib/python3.6/site-packages/fastcore/foundation.py in __call__(self, *args, **kwargs)
206 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
207 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 208 return self.fn(*fargs, **kwargs)
209
210 # Cell
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in _call_one(self, event_name)
224 def _call_one(self, event_name):
225 assert hasattr(event, event_name)
--> 226 [cb(event_name) for cb in sort_by_run(self.cbs)]
227
228 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in <listcomp>(.0)
224 def _call_one(self, event_name):
225 assert hasattr(event, event_name)
--> 226 [cb(event_name) for cb in sort_by_run(self.cbs)]
227
228 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in __call__(self, event_name)
23 _run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or
24 (self.run_valid and not getattr(self, 'training', False)))
---> 25 if self.run and _run: getattr(self, event_name, noop)()
26
27 @property
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/callback/progress.py in after_fit(self)
37 def after_fit(self):
38 if getattr(self, 'mbar', False):
---> 39 self.mbar.on_iter_end()
40 delattr(self, 'mbar')
41 self.learn.logger = self.old_logger
~/daisy-gan/venv/lib/python3.6/site-packages/fastprogress/fastprogress.py in on_iter_end(self)
155 total_time = format_time(time.time() - self.main_bar.start_t)
156 self.text = f'Total time: {total_time} <p>' + self.text
--> 157 self.out.update(HTML(self.text))
158
159 def add_child(self, child):
AttributeError: 'NBMasterBar' object has no attribute 'out'
Calling databunch() on a Datasource leads to an IndexError.
I did a fresh install from github of fastai2 and fastcore.
It does not matter what kind of iterable the Datasource contains.
from fastai2.basics import *
DataSource([0]*1000).databunch()
leads to the following error
IndexError Traceback (most recent call last)
in
1 from fastai2.basics import *
----> 2 DataSource([0.]*1000).databunch()
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in databunch(self, bs, val_bs, shuffle_train, n, path, dl_type, dl_kwargs, **kwargs)
149 if dl_type is None: dl_type = self._dl_type
150 dls = [dl_type(self.subset(i), bs=b, shuffle=s, drop_last=s, n=n if i==0 else None, **kwargs, **dk)
--> 151 for i,(b,s,dk) in enumerate(zip(bss,shuffles,dl_kwargs))]
152 return DataBunch(*dls, path=path)
153
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in (.0)
149 if dl_type is None: dl_type = self._dl_type
150 dls = [dl_type(self.subset(i), bs=b, shuffle=s, drop_last=s, n=n if i==0 else None, **kwargs, **dk)
--> 151 for i,(b,s,dk) in enumerate(zip(bss,shuffles,dl_kwargs))]
152 return DataBunch(*dls, path=path)
153
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in init(self, dataset, bs, shuffle, num_workers, **kwargs)
39 super().init(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs)
40 for nm in _batch_tfms: kwargs[nm].setup(self)
---> 41 if self.device is None: self._one_pass()
42
43 def _one_pass(self):
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in _one_pass(self)
42
43 def _one_pass(self):
---> 44 its = self.after_batch(self.do_batch([self.do_item(0)]))
45 if self.device is None: self.device = find_device(its)
46 self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1
~/miniconda3/lib/python3.7/site-packages/fastai2/data/load.py in do_item(self, s)
117 def prebatched(self): return self.bs is None
118 def do_item(self, s):
--> 119 try: return self.after_item(self.create_item(s))
120 except SkipItemException: return None
121 def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
~/miniconda3/lib/python3.7/site-packages/fastai2/data/load.py in create_item(self, s)
123 def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
124 def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
--> 125 def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
126 def create_batch(self, b): return (fa_collate,fa_convert)self.prebatched
127 def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in getitem(self, it)
203
204 def getitem(self, it):
--> 205 res = tuple([tl[it] for tl in self.tls])
206 return res if is_indexer(it) else list(zip(*res))
207
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in (.0)
203
204 def getitem(self, it):
--> 205 res = tuple([tl[it] for tl in self.tls])
206 return res if is_indexer(it) else list(zip(*res))
207
~/miniconda3/lib/python3.7/site-packages/fastai2/data/core.py in getitem(self, idx)
178
179 def getitem(self, idx):
--> 180 res = super().getitem(idx)
181 if self._after_item is None: return res
182 return self._after_item(res) if is_indexer(idx) else res.map(self._after_item)
~/miniconda3/lib/python3.7/site-packages/fastcore/foundation.py in getitem(self, idx)
314 def _xtra(self): return None
315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
--> 316 def getitem(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
317 def copy(self): return self._new(self.items.copy())
318
~/miniconda3/lib/python3.7/site-packages/fastcore/foundation.py in _get(self, i)
318
319 def _get(self, i):
--> 320 if is_indexer(i) or isinstance(i,slice): return getattr(self.items,'iloc',self.items)[i]
321 i = mask2idxs(i)
322 return (self.items.iloc[list(i)] if hasattr(self.items,'iloc')
IndexError: list index out of range
In the notebook - migrating.ipynb, under section 'Use the fastai training loop', Adam is used in the Learner declaration without importing it from fastai2.optimizer
Current
data = DataBunch(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
To be changed to:
from fastai2.optimizer import Adam
data = DataBunch(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
If we call learn.summary()
on a model that has a ParameterModule
the following error is raised:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-39-bc39e9e85f86> in <module>
----> 1 learn.summary()
~/forks/fastai2/fastai2/callback/hook.py in summary(self)
170 res += "=" * n + "\n"
171 ps,trn_ps = 0,0
--> 172 for typ,np,trn,sz in infos:
173 if sz is None: continue
174 ps += np
TypeError: cannot unpack non-iterable NoneType object
A very simple example is:
m = ResBlock(1, 3, 32, sa=True)
learn = Learner(dbch, m)
learn.summary()
Where SimpleSelfAttention
uses a nn.Parameter
(self.gamma
).
My first intuition was to go to layer_info
and do:
layers = [m for m in flatten_model(learn.model) if not isinstance(m, ParameterModule)]
But that is wrong, because we do want the parameter to show in the summary.
In reality I don't understand exactly why this error is thrown.
Bumped into the following issue when install fastai2 on Nvidia Jetson Nano which is on arm
Collecting fastprogress>=0.1.22
Downloading fastprogress-0.2.2-py3-none-any.whl (12 kB)
ERROR: Could not find a version that satisfies the requirement torch<1.4.0,>=1.2.0 (from fastai2) (from versions: 0.1.2, 0.1.2.post1, 0.1.2.post2)
ERROR: No matching distribution found for torch<1.4.0,>=1.2.0 (from fastai2)
Would it be possible to implement/show an example/tutorial of how to implement a 4 channel image input/multiple inputs (image + image, image + text) for tasks like (image segmentation/classification)? I know that some of these tasks will eventually be implemented by the community at some point (6 months after the release of a stable version), like it happened for the fastai v1. But it will boost fastai adoption if some guide to do this would be present from the beginning (personally i always fallback to keras functional api when i have to manage special inputs, etc. And I always regret what I loose when doing that).
So, would that be possible? I'm willing to contribute if you share some direction (i understand that you are probably ver busy).
Thank you!
Leaving this as a GitHub issue so it can be looked later (not a major priority) when the book is done :)
dl = learn.dls.test_dl(imgs[:10])
preds = learn.get_preds(dl=dl)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-65-9a993e47d6c2> in <module>()
----> 1 preds = learn.get_preds(dl=dl)
12 frames
/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in get_preds(self, ds_idx, dl, with_input, with_decoded, with_loss, act, **kwargs)
319 self(_before_epoch)
320 self._do_epoch_validate(dl=dl)
--> 321 self(_after_epoch)
322 if act is None: act = getattr(self.loss_func, 'activation', noop)
323 res = cb.all_tensors()
/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in __call__(self, event_name)
226 def ordered_cbs(self, cb_func:str): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
227
--> 228 def __call__(self, event_name): L(event_name).map(self._call_one)
229 def _call_one(self, event_name):
230 assert hasattr(event, event_name)
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in map(self, f, *args, **kwargs)
360 else f.format if isinstance(f,str)
361 else f.__getitem__)
--> 362 return self._new(map(g, self))
363
364 def filter(self, f, negate=False, **kwargs):
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in _new(self, items, *args, **kwargs)
313 @property
314 def _xtra(self): return None
--> 315 def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs)
316 def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None)
317 def copy(self): return self._new(self.items.copy())
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in __call__(cls, x, *args, **kwargs)
39 return x
40
---> 41 res = super().__call__(*((x,) + args), **kwargs)
42 res._newchk = 0
43 return res
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in __init__(self, items, use_list, match, *rest)
304 if items is None: items = []
305 if (use_list is not None) or not _is_array(items):
--> 306 items = list(items) if use_list else _listify(items)
307 if match is not None:
308 if is_coll(match): match = len(match)
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in _listify(o)
240 if isinstance(o, list): return o
241 if isinstance(o, str) or _is_array(o): return [o]
--> 242 if is_iter(o): return list(o)
243 return [o]
244
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in __call__(self, *args, **kwargs)
206 if isinstance(v,_Arg): kwargs[k] = args.pop(v.i)
207 fargs = [args[x.i] if isinstance(x, _Arg) else x for x in self.pargs] + args[self.maxi+1:]
--> 208 return self.fn(*fargs, **kwargs)
209
210 # Cell
/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in _call_one(self, event_name)
229 def _call_one(self, event_name):
230 assert hasattr(event, event_name)
--> 231 [cb(event_name) for cb in sort_by_run(self.cbs)]
232
233 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in <listcomp>(.0)
229 def _call_one(self, event_name):
230 assert hasattr(event, event_name)
--> 231 [cb(event_name) for cb in sort_by_run(self.cbs)]
232
233 def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in __call__(self, event_name)
23 _run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or
24 (self.run_valid and not getattr(self, 'training', False)))
---> 25 if self.run and _run: getattr(self, event_name, noop)()
26
27 def __setattr__(self, name, value):
/usr/local/lib/python3.6/dist-packages/fastai2/learner.py in after_fit(self)
91 "Concatenate all recorded tensors"
92 if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim))
---> 93 if not self.save_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim))
94 if not self.save_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim))
95 if self.with_loss: self.losses = to_concat(self.losses)
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in __getattr__(self, k)
221 attr = getattr(self,self._default,None)
222 if attr is not None: return getattr(attr, k)
--> 223 raise AttributeError(k)
224 def __dir__(self): return custom_dir(self, self._dir() if self._xtra is None else self._dir())
225 # def __getstate__(self): return self.__dict__
AttributeError: preds
learn = torch.load(path/'export.pkl')
pred_class,pred_idx,outputs = learn.predict(path/'ClassA'/'WhatsApp Image 2020-02-01 at 08.58.27.jpeg')
The error occurring is
learn = torch.load(path/'export.pkl')
Could not do one pass in your dataloader, there is something wrong in it
IndexError Traceback (most recent call last)
in ()
1 learn = torch.load(path/'export.pkl')
----> 2 pred_class,pred_idx,outputs = learn.predict(path/'Harshita'/'WhatsApp Image 2020-02-01 at 08.58.27.jpeg')
11 frames
/usr/local/lib/python3.6/dist-packages/fastcore/foundation.py in _get(self, i)
318
319 def _get(self, i):
--> 320 if is_indexer(i) or isinstance(i,slice): return getattr(self.items,'iloc',self.items)[i]
321 i = mask2idxs(i)
322 return (self.items.iloc[list(i)] if hasattr(self.items,'iloc')
When running a trained model:
learn.export('models/unet-basic-v3')
---------------------------------------------------------------------------
PicklingError Traceback (most recent call last)
<ipython-input-16-7bf582ede4a1> in <module>
1 learn.save('unet-basic-v3')
----> 2 learn.export('models/unet-basic-v3')
~/daisy-gan/venv/lib/python3.6/site-packages/fastai2/learner.py in export(self, fname)
580 #To avoid the warning that come from PyTorch about model not being checked
581 warnings.simplefilter("ignore")
--> 582 torch.save(self, self.path/fname)
583 self.create_opt()
584 self.opt.load_state_dict(state)
~/daisy-gan/venv/lib/python3.6/site-packages/torch/serialization.py in save(obj, f, pickle_module, pickle_protocol)
258 >>> torch.save(x, buffer)
259 """
--> 260 return _with_file_like(f, "wb", lambda f: _save(obj, f, pickle_module, pickle_protocol))
261
262
~/daisy-gan/venv/lib/python3.6/site-packages/torch/serialization.py in _with_file_like(f, mode, body)
183 f = open(f, mode)
184 try:
--> 185 return body(f)
186 finally:
187 if new_fd:
~/daisy-gan/venv/lib/python3.6/site-packages/torch/serialization.py in <lambda>(f)
258 >>> torch.save(x, buffer)
259 """
--> 260 return _with_file_like(f, "wb", lambda f: _save(obj, f, pickle_module, pickle_protocol))
261
262
~/daisy-gan/venv/lib/python3.6/site-packages/torch/serialization.py in _save(obj, f, pickle_module, pickle_protocol)
330 pickler = pickle_module.Pickler(f, protocol=pickle_protocol)
331 pickler.persistent_id = persistent_id
--> 332 pickler.dump(obj)
333
334 serialized_storage_keys = sorted(serialized_storages.keys())
PicklingError: Can't pickle <function <lambda> at 0x7fba35a15f28>: attribute lookup <lambda> on __main__ failed
This is probably related to scikit-learn/scikit-learn#9467 as fastai2 seems to make heavy use of lambda functions. Will investigate where this comes from. This is probably higher priority for me than #60
This error can be triggered by simply including this line at the end of the camvid
example.
i am trying to use SchedLin callback
lr_max = np.array([h['lr'] for h in learn.opt.hypers]) scheds = { 'lr': combine_scheds([pct_start, 1 - pct_start], [SchedLin(lr_max / 10, lr_max), SchedCos(lr_max, 1e-4)])} learn.fit(n_epoch, cbs=ParamScheduler(scheds), wd=wd)
and i get an error
File "\fastai2\callback\schedule.py", line 20, in SchedLin
return start + pos*(end-start)
TypeError: mul(): argument 'other' (position 1) must be Tensor, not numpy.ndarray
A declarative, efficient, and flexible JavaScript library for building user interfaces.
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google ❤️ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.