I am trying to run a text summarization "t5-base" model. The code used to work when I first ran it but after installing/reinstalling some packages, it no longer works. Can anyone please tell me how to resolve this issue? ðŸ˜
Here is my code:
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('t5-base')
model = AutoModelWithLMHead.from_pretrained('t5-base', return_dict=True)
inputs = tokenizer.encode("summarize: " + text,
return_tensors='pt',
max_length=512,
truncation=True)
summary_ids = model.generate(inputs, max_length=150, min_length=80, length_penalty=5., num_beams=2)
text = tokenizer.decode(summary_ids[0])
text = text.replace("<pad>","").replace("</s>","")
text
Below is the error message I get:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-46-2c9eeafa599f> in <module>
1 import torch
----> 2 from transformers import AutoModel, AutoTokenizer
~/opt/anaconda3/lib/python3.7/site-packages/transformers/__init__.py in <module>
29 # Check the dependencies satisfy the minimal versions required.
30 from . import dependency_versions_check
---> 31 from .utils import (
32 _LazyModule,
33 is_flax_available,
ImportError: cannot import name '_LazyModule' from 'transformers.utils' (/Users/sangjinlee/opt/anaconda3/lib/python3.7/site-packages/transformers/utils/__init__.py)
Related
This question already has answers here:
ImportError: cannot import name 'is_directory'
(3 answers)
Closed last month.
While using this code, I get this error of Pillow. I tried re-installing pillow but still struggling with this issue. Any help to make this code run?
import layoutparser as lp
model = lp.Detectron2LayoutModel(
config_path ='lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config', # In model catalog
label_map ={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"}, # In model`label_map`
extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8] # Optional
)
model.detect(image)
Getting this error:
ImportError Traceback (most recent call last)
[<ipython-input-6-59f0fb07b7e3>](https://localhost:8080/#) in <module>
1 import layoutparser as lp
----> 2 model = lp.Detectron2LayoutModel(
3 config_path ='lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config', # In model catalog
4 label_map ={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"}, # In model`label_map`
5 extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8] # Optional
31 frames
[/usr/local/lib/python3.7/dist-packages/PIL/ImageFont.py](https://localhost:8080/#) in <module>
35 from . import Image
36 from ._deprecate import deprecate
---> 37 from ._util import is_directory, is_path
38
39
ImportError: cannot import name 'is_directory' from 'PIL._util' (/usr/local/lib/python3.7/dist-packages/PIL/_util.py)
Run the below command before installing the library:
!pip install fastcore -U
I have been trying to import a .tif into python using the following code:
image = 'location/map_Western_Cape.tif' #use same location as above
geemap.plot_raster(image, cmap='terrain', figsize=(15, 10))
image_Gauteng = 'location/map_Gauteng.tif' #use same location as above
geemap.plot_raster(image, cmap='terrain', figsize=(15, 10))
However, I keep getting this error message:
AttributeError Traceback (most recent call last)
Input In [11], in <cell line: 4>()
1 # Load the .tif from local machine
3 image = 'C:/Users/kiral/OneDrive - Stellenbosch University/BScHons Global Change/Guy/map_Western_Cape.tif' #use same location as above
----> 4 geemap.plot_raster(image, cmap='terrain', figsize=(15, 10))
6 image_Gauteng = 'C:/Users/kiral/OneDrive - Stellenbosch University/BScHons Global Change/Guy/map_Gauteng.tif' #use same location as above
7 geemap.plot_raster(image, cmap='terrain', figsize=(15, 10))
File ~\anaconda3\lib\site-packages\geemap\common.py:12559, in plot_raster(image, band, cmap, proj, figsize, open_kwargs, **kwargs)
12557 try:
12558 import pvxarray
> 12559 import rioxarray
12560 import xarray
12561 except ImportError:
File ~\anaconda3\lib\site-packages\rioxarray\__init__.py:6, in <module>
3 __author__ = """rioxarray Contributors"""
4 __email__ = "alansnow21#gmail.com"
----> 6 import rioxarray.raster_array # noqa
7 import rioxarray.raster_dataset # noqa
8 from rioxarray._io import open_rasterio # noqa
File ~\anaconda3\lib\site-packages\rioxarray\raster_array.py:28, in <module>
14 from typing import (
15 Any,
16 Dict,
(...)
24 Union,
25 )
27 import numpy as np
---> 28 import rasterio
29 import rasterio.mask
30 import rasterio.warp
File ~\anaconda3\lib\site-packages\rasterio\__init__.py:44, in <module>
41 import warnings
43 import rasterio._loading
---> 44 with rasterio._loading.add_gdal_dll_directories():
45 from rasterio._show_versions import show_versions
46 from rasterio._version import gdal_version, get_geos_version, get_proj_version
AttributeError: partially initialized module 'rasterio' has no attribute '_loading' (most likely due to a circular import)
I have tried installing all of the listed modules, but that ended up getting a bunch of errors too.
Could anyone recommend a way to import these tifs?
I downloaded the code from here https://github.com/SpaceNetChallenge/SpaceNet_SAR_Buildings_Solutions, specifically using model 1. I downloaded the weights corresponding and created the following file to load the model and test. First, I copy the Unet part in main.py into a separate file umodel.py and the test file as follows
import torch
exec(open("./umodel.py").read())
network_data = torch.load('snapshot_fold_8_best')
print(network_data.keys())
import sys
sys.path.append("geffnet")
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
args = Namespace(extra_num = 1,
dec_ch = [32, 64, 128, 256, 1024],
stride=32,
net='b5',
bot1x1=True,
glob=True,
bn=True,
aspp=True,
ocr=True,
aux_scale=True)
def load_state_dict(model, state_dict):
missing_keys = []
# from UnetOS.umodel import Unet
exec(open("./umodel.py").read())
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# from UnetOS.umodel import *
model = Unet(extra_num = args.extra_num, dec_ch = args.dec_ch, stride=args.stride, net=args.net, bot1x1 = args.bot1x1, glob=args.glob, bn=args.bn, aspp=args.aspp,
ocr=args.ocr, aux = args.aux_scale > 0).cuda()
load_state_dict(model, network_data)
My question is, why exec(open("./umodel.py").read()) works nicely but whenever I tried to import from umodel import Unet it has errors
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_10492/1282530406.py in <module>
9 # ah
10 model = Unet(extra_num = args.extra_num, dec_ch = args.dec_ch, stride=args.stride, net=args.net, bot1x1 = args.bot1x1, glob=args.glob, bn=args.bn, aspp=args.aspp,
---> 11 ocr=args.ocr, aux = args.aux_scale > 0).cuda()
12 #model = Unet()
13 #print(network_data.key())
D:\hines\Pretrained\1-zbigniewwojna\UnetOS\umodel.py in __init__(self, extra_num, dec_ch, stride, net, bot1x1, glob, bn, aspp, ocr, aux)
238 ['ir_r4_k5_s2_e6_c192_se0.25'],
239 ['ir_r1_k3_s1_e6_c320_se0.25']]
--> 240 enc = GenEfficientNet(in_chans=3, block_args=decode_arch_def(arch_def, depth_multiplier),
241 num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32,
242 channel_multiplier=channel_multiplier, act_layer=resolve_act_layer({}, 'swish'),
NameError: name 'decode_arch_def' is not defined
The main file is as follow https://github.com/SpaceNetChallenge/SpaceNet_SAR_Buildings_Solutions/blob/master/1-zbigniewwojna/main.py
From the error message, it appears that decode_arch_def is not available and looking at your imports, that has to come from from geffnet.efficientnet_builder import * (it does https://github.com/rwightman/gen-efficientnet-pytorch/blob/master/geffnet/efficientnet_builder.py)
Your exec must have worked because it followed a similar import, that brought decode_arch_def in scope - exec() executes code in the current scope, so it will work because in that scope decode_arch_def is already defined.
However, when you import, the imported module itself doesn't have the function you need in scope. You should add the required import statements to the file you're importing to bring them into scope and it should work.
For example, with a mod.py containing this:
def my_func():
print(datetime.now())
This works:
from datetime import datetime
exec(open("./mod.py").read())
my_func()
But this does not:
from datetime import datetime
import mod
mod.my_func()
To make that work, mod.py would have to be:
from datetime import datetime
def my_func():
print(datetime.now())
And the import of datetime wouldn't be needed in the main program, since it's not referenced there. Your code has a similar issue - you need to determine all the dependencies of your Unet class and import them.
I have following block of code:
i want to use ICCLIM (Indice Calculation CLIMate) is a Python library
To calculate some indices
how to solve this problem ??
files = ['tasmax_day_CNRM-CM5_historical_r1i1p1_19950101-19991231.nc', 'tasmax_day_CNRM-CM5_historical_r1i1p1_20000101-20041231.nc', 'tasmax_day_CNRM-CM5_historical_r1i1p1_20050101-20051231.nc']
dt1 = datetime.datetime(1998,1,1)
dt2 = datetime.datetime(2005,12,31)
out_f ='SU_JJA_CNRM-CM5_historical_r1i1p1_1998-2005.nc' # OUTPUT FILE: summer season values of SU
icclim.index(index_name='SU', in_files=files, var_name='tasmax', time_range=[dt1, dt2], slice_mode='JJA', out_file=out_f)
dt1 = datetime.datetime(1998,1,1)
dt2 = datetime.datetime(2005,12,31)
out_f = 'SU_JJA_CNRM-CM5_historical_r1i1p1_1998-2005.nc' # OUTPUT FILE: summer season values of SU
icclim.index(index_name='SU', in_files=files, var_name='tasmax', time_range=[dt1, dt2], slice_mode='JJA', out_file=out_f)
and the import list is
import numpy
import cython
import netcdftime
import cftime
import netCDF4
import sys
import glob
import os
import datetime
import dask
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xarray
import nc_time_axis
import logging
import pytest
import setuptools
import xclim
import icclim
we i run the code the error keep show
here is the error
AttributeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_2572/579676545.py in <module>
2 dt2 = datetime.datetime(1986,12,31)
3
----> 4 icclim.index(index_name='CD', in_files='C:/Users/Dana/Desktop/icclim/pr_year_1986.nc',time_range=[dt1, dt2], var_name='pr', slice_mode='year', out_file='C:/Users/Dana/Desktop/icclim/new_pr_year_1986.nc')
C:\ProgramData\Anaconda\lib\site-packages\icclim\main.py in index(in_files, var_name, index_name, slice_mode, time_range, out_file, threshold, transfer_limit_Mbytes, callback, callback_percentage_start_value, callback_percentage_total, base_period_time_range, window_width, only_leap_years, ignore_Feb29th, interpolation, out_unit, netcdf_version, user_index, save_percentile, logs_verbosity, indice_name, user_indice)
127 log.set_verbosity(logs_verbosity)
128
--> 129 log.start_message()
130 callback(callback_percentage_start_value)
131 if indice_name is not None:
C:\ProgramData\Anaconda\lib\site-packages\icclim\icclim_logger.py in start_message(self)
62
63 # flake8: noqa
---> 64 time_now = time.asctime(time.gmtime()) + " " + self.timezone
65 if self.verbosity == Verbosity.SILENT:
66 return
AttributeError: 'IcclimLogger' object has no attribute 'timezone'
Sorry for the late reply. This was due to a bug within icclim and it has been fixed in a later version.
I advise you to try the latest version (5.1.0) available on pypi or conda-forge.
NameError Traceback (most recent call last)
<ipython-input-13-94eb7b0557cb> in <module>
----> 1 from fastai.vision import *
2 import pickle as pkl
3 import builtins
~/anaconda3/envs/FGMLCI/lib/python3.6/site-packages/fastai/vision/__init__.py in <module>
10 from .. import vision
11
---> 12 __all__ = [*basics.__all__, *learner.__all__, *data.__all__, *image.__all__, *transform.__all__, *tta.__all__, 'models', 'vision']
13
NameError: name 'learner' is not defined
--------------------------------------
version :
fastai==1.0.60
torch==1.2.0
torchvision==0.4.0a0+6b959ee
it happened me with fastai version 2, my problem is that I deleted this import by accident
from fastai.vision.all import
just for the ones that have this problem in version 2