Huggingface SciBERT predict masked word not working - python

I am trying to use the pretrained SciBERT model (https://huggingface.co/allenai/scibert_scivocab_uncased) from Huggingface to predict masked words in scientific/biomedical text. This produces errors, and not sure how to move forward from this point.
Here is the code so far -
!pip install transformers
from transformers import pipeline, AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
model = AutoModel.from_pretrained("allenai/scibert_scivocab_uncased")
unmasker = pipeline('fill-mask', model=model, tokenizer=tokenizer)
unmasker("the patient is a 55 year old [MASK] admitted with pneumonia")
This works with BERT alone, but is not the specialized pre-trained model -
!pip install transformers
from transformers import pipeline
unmasker = pipeline('fill-mask', model='bert-base-uncased')
unmasker("the patient is a 55 year old [MASK] admitted with pneumonia")
The errors with SciBERT are -
/usr/local/lib/python3.7/dist-packages/transformers/pipelines/__init__.py in pipeline(task, model, config, tokenizer, feature_extractor, framework, revision, use_fast, use_auth_token, model_kwargs, **kwargs)
494 kwargs["feature_extractor"] = feature_extractor
495
--> 496 return task_class(model=model, framework=framework, task=task, **kwargs)
/usr/local/lib/python3.7/dist-packages/transformers/pipelines/fill_mask.py in __init__(self, model, tokenizer, modelcard, framework, args_parser, device, top_k, task)
73 )
74
---> 75 self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
76 self.top_k = top_k
77
/usr/local/lib/python3.7/dist-packages/transformers/pipelines/base.py in check_model_type(self, supported_models)
652 self.task,
653 self.model.base_model_prefix,
--> 654 f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
655 )
656
PipelineException: The model 'BertModel' is not supported for fill-mask. Supported models are ['BigBirdForMaskedLM', 'Wav2Vec2ForMaskedLM', 'ConvBertForMaskedLM', 'LayoutLMForMaskedLM', 'DistilBertForMaskedLM', 'AlbertForMaskedLM', 'BartForConditionalGeneration', 'MBartForConditionalGeneration', 'CamembertForMaskedLM', 'XLMRobertaForMaskedLM', 'LongformerForMaskedLM', 'RobertaForMaskedLM', 'SqueezeBertForMaskedLM', 'BertForMaskedLM', 'MegatronBertForMaskedLM', 'MobileBertForMaskedLM', 'FlaubertWithLMHeadModel', 'XLMWithLMHeadModel', 'ElectraForMaskedLM', 'ReformerForMaskedLM', 'FunnelForMaskedLM', 'MPNetForMaskedLM', 'TapasForMaskedLM', 'DebertaForMaskedLM', 'DebertaV2ForMaskedLM', 'IBertForMaskedLM']

As the error message tells you, you need to use AutoModelForMaskedLM:
from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
model = AutoModelForMaskedLM.from_pretrained("allenai/scibert_scivocab_uncased")
unmasker = pipeline('fill-mask', model=model, tokenizer=tokenizer)
unmasker("the patient is a 55 year old [MASK] admitted with pneumonia")
Output:
[{'sequence': 'the patient is a 55 year old woman admitted with pneumonia',
'score': 0.4025486707687378,
'token': 10221,
'token_str': 'woman'},
{'sequence': 'the patient is a 55 year old man admitted with pneumonia',
'score': 0.23970800638198853,
'token': 508,
'token_str': 'man'},
{'sequence': 'the patient is a 55 year old female admitted with pneumonia',
'score': 0.15444642305374146,
'token': 3672,
'token_str': 'female'},
{'sequence': 'the patient is a 55 year old male admitted with pneumonia',
'score': 0.1111455038189888,
'token': 3398,
'token_str': 'male'},
{'sequence': 'the patient is a 55 year old boy admitted with pneumonia',
'score': 0.015877680853009224,
'token': 12481,
'token_str': 'boy'}]

Related

BulkIndexError: ('2 document(s) failed to index.') - Elasticsearch + Python

At first I found some null values in my preprocessed data, so removed those.
(Here's my Data Cleaning Code - and the respective outputs enclosed in '''Comments''')
Cleaning and Preprocessing
df_merged[df_merged.abstract_x != df_merged.abstract_y].shape
#this means out of the 25000 samples, abstract is not matching between metadata and pdf data
'''(22728, 22)'''
# check metadata abstract column to see if null values exist
df_merged.abstract_x.isnull().sum()
'''3363'''
# Check pdf_json abstract to see if null values exist
df_merged.abstract_y.isnull().sum()
'''0'''
#Since the abstract_x from metadata is more reliable , we will use it but only fill by abstract_y text when abstract_x value is null
# Convert all columns to string and then replace abstract_y values
#df = df.astype(str)
df_merged['abstract_y'] = df_merged['abstract_y'].astype(str)
df_merged['abstract_y'] = np.where(df_merged['abstract_y'].map(len) > 50, df_merged['abstract_y'], 'na')
df_merged.loc[df_merged.abstract_x.isnull() & (df_merged.abstract_y != 'na'), 'abstract_x'] = df_merged[df_merged.abstract_x.isnull() & (df_merged.abstract_y != 'na')].abstract_y #we want to overwrite the abstract_x column and abstract_y has to be not na
df_merged.abstract_x.isnull().sum()
'''
2745
'''
df_merged.rename(columns={'abstract_x': 'abstract'}, inplace=True)
df_merged.columns
'''
Index(['cord_uid', 'sha', 'source_x', 'title', 'doi', 'pmcid', 'pubmed_id',
'license', 'abstract', 'publish_time', 'authors', 'journal', 'mag_id',
'who_covidence_id', 'arxiv_id', 'pdf_json_files', 'pmc_json_files',
'url', 's2_id', 'abstract_y', 'body_text_x', 'body_text_y'],
dtype='object')
'''
df_merged = df_merged.drop(['abstract_y'], axis=1)
df_merged.columns
'''
Index(['cord_uid', 'sha', 'source_x', 'title', 'doi', 'pmcid', 'pubmed_id',
'license', 'abstract', 'publish_time', 'authors', 'journal', 'mag_id',
'who_covidence_id', 'arxiv_id', 'pdf_json_files', 'pmc_json_files',
'url', 's2_id', 'body_text_x', 'body_text_y'],
dtype='object')
'''
(df_merged.body_text_x != df_merged.body_text_y).sum()
'''25000'''
df_merged.body_text_x.isnull().sum()
'''1526'''
df_merged.body_text_y.isnull().sum()
'''5238'''
df_merged[df_merged.body_text_x.isnull() & df_merged.body_text_y.notnull()].shape
'''(1447, 21)'''
#when the body_text_y is not null, we'll be putting, bodytext y into x
df_merged.loc[df_merged.body_text_y.notnull(), 'body_text_x'] = df_merged.loc[df_merged.body_text_y.notnull(), 'body_text_y']
df_merged.body_text_x.isnull().sum()
'''79'''
df_merged.columns
'''
Index(['cord_uid', 'sha', 'source_x', 'title', 'doi', 'pmcid', 'pubmed_id',
'license', 'abstract', 'publish_time', 'authors', 'journal', 'mag_id',
'who_covidence_id', 'arxiv_id', 'pdf_json_files', 'pmc_json_files',
'url', 's2_id', 'body_text_x', 'body_text_y'],
dtype='object')
'''
df_merged.rename(columns={'body_text_x': 'body_text'}, inplace=True)
df_merged = df_merged.drop(['body_text_y'], axis=1)
df_merged.columns
'''
Index(['cord_uid', 'sha', 'source_x', 'title', 'doi', 'pmcid', 'pubmed_id',
'license', 'abstract', 'publish_time', 'authors', 'journal', 'mag_id',
'who_covidence_id', 'arxiv_id', 'pdf_json_files', 'pmc_json_files',
'url', 's2_id', 'body_text'],
dtype='object')
'''
df_final = df_merged[['sha', 'title', 'abstract', 'publish_time', 'authors', 'url', 'body_text']]
df_final.head()
sha title abstract publish_time authors url body_text
0 1cbf95a2c3a39e5cc80a5c4c6dbcec7cc718fd59 Genomic Evolution of Severe Acute Respiratory ... Abstract Recent emergence of severe acute resp... 2020-08-31 Jacob, Jobin John; Vasudevan, Karthick; Veerar... https://api.elsevier.com/content/article/pii/S... The outbreak of severe acute respiratory syndr...
1 7dc6943ca46a1093ece2594002d61efdf9f51f28 Impact of COVID-19 on COPD and Asthma admissio... Asthma and Chronic Obstructive Pulmonary Disea... 2020-12-10 Sykes, Dominic L; Faruqi, Shoaib; Holdsworth, ... https://www.ncbi.nlm.nih.gov/pubmed/33575313/;... The COVID-19 pandemic has led to an overall re...
2 5b127336f68f3dca83981d0142eda472634378f0 Programmable System of Cas13-Mediated RNA Modi... Clustered regularly interspaced short palindro... 2021-07-27 Tang, Tian; Han, Yingli; Wang, Yuran; Huang, H... https://www.ncbi.nlm.nih.gov/pubmed/34386490/;... Prokaryotic clustered regularly interspaced sh...
3 aafbe282248436380dd737bae844725882df2249 Are You Tired of Working amid the Pandemic? Th... With the outbreak of novel coronavirus in 2019... 2020-12-09 Chen, Huaruo; Liu, Fan; Pang, Liman; Liu, Fei;... https://doi.org/10.3390/ijerph17249188; https:... In the outbreak of novel coronavirus pneumonia...
4 4013a7e351c40d2bb7fdfe7f185d2ef9b1a872e6 Viral Sepsis in Children Sepsis in children is typically presumed to be... 2018-09-18 Gupta, Neha; Richter, Robert; Robert, Stephen;... https://www.ncbi.nlm.nih.gov/pubmed/30280095/;... The true incidence of viral sepsis, particular...
df_final = df_final.dropna(axis=0,subset=['abstract', 'body_text'])
df_final.isnull().sum()
'''
sha 0
title 0
abstract 0
publish_time 0
authors 104
url 0
body_text 0
dtype: int64
'''
df_final.shape
'''(22186, 7)'''
df_final.to_csv('FINAL_CORD_DATA.csv', index=False)
''')
Whenever I try to use the Sample Dataset that I created, in my es_populate notebook, using the sparse retriever, I keep getting
BulkIndexError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_19912/2533749049.py in <module>
----> 1 document_store.write_documents(final_dicts)
~\anaconda3\lib\site-packages\haystack\document_store\elasticsearch.py in write_documents(self, documents, index, batch_size, duplicate_documents)
426 # Pass batch_size number of documents to bulk
427 if len(documents_to_index) % batch_size == 0:
--> 428 bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type)
429 documents_to_index = []
430
~\anaconda3\lib\site-packages\elasticsearch\helpers\actions.py in bulk(client, actions, stats_only, *args, **kwargs)
388 # make streaming_bulk yield successful results so we can count them
389 kwargs["yield_ok"] = True
--> 390 for ok, item in streaming_bulk(client, actions, *args, **kwargs):
391 # go through request-response pairs and detect failures
392 if not ok:
~\anaconda3\lib\site-packages\elasticsearch\helpers\actions.py in streaming_bulk(client, actions, chunk_size, max_chunk_bytes, raise_on_error, expand_action_callback, raise_on_exception, max_retries, initial_backoff, max_backoff, yield_ok, *args, **kwargs)
309
310 try:
--> 311 for data, (ok, info) in zip(
312 bulk_data,
313 _process_bulk_chunk(
~\anaconda3\lib\site-packages\elasticsearch\helpers\actions.py in _process_bulk_chunk(client, bulk_actions, bulk_data, raise_on_exception, raise_on_error, *args, **kwargs)
245 resp=resp, bulk_data=bulk_data, raise_on_error=raise_on_error
246 )
--> 247 for item in gen:
248 yield item
249
~\anaconda3\lib\site-packages\elasticsearch\helpers\actions.py in _process_bulk_chunk_success(resp, bulk_data, raise_on_error)
186
187 if errors:
--> 188 raise BulkIndexError("%i document(s) failed to index." % len(errors), errors)
189
190
BulkIndexError: ('2 document(s) failed to index.', [{'index': {'_index': 'document', '_type': '_doc', '_id': '9d04e1c37a299818d82416898ffe22d6', 'status': 400, 'error': {'type': 'mapper_parsing_exception', 'reason': 'failed to parse', 'caused_by': {'type': 'json_parse_exception', 'reason': "Non-standard token 'NaN': enable JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS to allow\n at [Source: (ByteArrayInputStream); line: 1, column: 217076]"}}, 'data': {'text': 'Increase
My method of using the document store was.
# Connect to Elasticsearch
from haystack.document_store import ElasticsearchDocumentStore
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document")
C:\Users\manan\anaconda3\lib\site-packages\elasticsearch\connection\base.py:190: ElasticsearchDeprecationWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html to enable security.
warnings.warn(message, category=ElasticsearchDeprecationWarning)
02/20/2022 00:58:28 - INFO - elasticsearch - HEAD http://localhost:9200/ [status:200 request:0.227s]
02/20/2022 00:58:28 - INFO - elasticsearch - HEAD http://localhost:9200/document [status:200 request:0.015s]
02/20/2022 00:58:28 - INFO - elasticsearch - GET http://localhost:9200/document [status:200 request:0.011s]
02/20/2022 00:58:28 - INFO - elasticsearch - PUT http://localhost:9200/document/_mapping [status:200 request:0.087s]
02/20/2022 00:58:28 - INFO - elasticsearch - HEAD http://localhost:9200/label [status:200 request:0.006s]
document_store.write_documents(final_dicts)
02/20/2022 00:58:34 - INFO - elasticsearch - POST http://localhost:9200/_bulk?refresh=wait_for [status:200 request:3.887s]
02/20/2022 00:58:38 - INFO - elasticsearch - POST http://localhost:9200/_bulk?refresh=wait_for [status:200 request:3.464s]
followed by the above error.
I'm very new to this, and would appreciate any help that could come my way.

Getting AttributeError: 'TestInventorySellStock' object has no attribute 'balance_inventory' while testing below pytest framework

I am trying to test a framework containing 3 classes as shown below. While running the test using pytest, i am getting 'AttributeError: 'TestInventorySellStock' object has no attribute 'balance_inventory'' as shown below. 'balance_inventory' has been defined in 2nd class 'MobileInventory', but somehow i am not able to reference it into 3rd class 'TestInventorySellStock'. Can someone please help in pointing where i am doing wrong?
P.S: Apologies in advance if i missed to add any info. Please let me know incase if i need to add any other details.
1st Class:
class InsufficientException(Exception):
pass
2nd class:
class MobileInventory:
def __init__(self, inventory=None):
if inventory is None:
self.balance_inventory = {}
else:
if not isinstance(inventory, dict):
raise TypeError("Input inventory must be a dictionary")
for model in inventory:
if not isinstance(model, str):
raise ValueError("Mobile model name must be a string")
if not isinstance(inventory[model], int) or inventory[model] < 0:
raise ValueError("No. of mobiles must be a positive integer")
self.balance_inventory = inventory
def sell_stock(self, requested_stock):
if not isinstance(requested_stock, dict):
raise TypeError("Requested stock must be a dictionary")
for model in requested_stock:
if not isinstance(model, str):
raise ValueError("Mobile model name must be a string")
if not isinstance(requested_stock[model], int) or requested_stock[model] < 0:
raise ValueError("No. of mobiles must be a positive integer")
if model not in self.balance_inventory:
raise InsufficientException("No Stock. New Model Request")
if requested_stock[model] > self.balance_inventory[model]:
raise InsufficientException("Insufficient Stock")
self.balance_inventory[model] -= requested_stock[model]
3rd Class:
class TestInventorySellStock:
#classmethod
def setup_class(cls):
cls.inventory = MobileInventory({'iPhone Model A': 50, 'Xiaomi Model B': 2000, 'Nokia Model C': 10, 'Sony Model D': 1})
def test_sell_stock_as_dict(self):
self.inventory.sell_stock({'iPhone Model A': 2, 'Xiaomi Model B': 20, 'Sony Model D': 1})
assert self.inventory.balance_inventory == {'iPhone Model A': 48, 'Xiaomi Model B': 1980, 'Nokia Model C': 10,
'Sony Model D': 0}
def test_sell_stock_with_float_values(self):
with pytest.raises(ValueError) as e:
MobileInventory.sell_stock(self, {'iPhone Model A': 2.5, 'Xiaomi Model B': 3.1, 'Nokia Model C': 4})
assert str(e.value) == 'No. of mobiles must be a positive integer'
def test_sell_stock_of_nonexisting_model(self):
with pytest.raises(InsufficientException) as e:
MobileInventory.sell_stock(self, {'iPhone Model B': 2, 'Xiaomi Model B': 5})
assert str(e.value) == 'No Stock. New Model Request'
def test_sell_stock_of_insufficient_stock(self):
with pytest.raises(InsufficientException) as e:
MobileInventory.sell_stock(self, {'iPhone Model A': 2, 'Xiaomi Model B': 5, 'Nokia Model C': 15})
assert str(e.value) == 'Insufficient Stock'
Complete Error details:
As #ForceBru suggested above in the comments, the code worked after i replaced 'MobileInventory' with 'self.inventory' in both 2nd & 3rd class. This should be the way how classes are handled in Python. More knowledge on OOP's would be required to solve such problems.

Entity Recognition in Stanford NLP using Python

I am using Stanford Core NLP using Python.I have taken the code from here.
Following is the code :
from stanfordcorenlp import StanfordCoreNLP
import logging
import json
class StanfordNLP:
def __init__(self, host='http://localhost', port=9000):
self.nlp = StanfordCoreNLP(host, port=port,
timeout=30000 , quiet=True, logging_level=logging.DEBUG)
self.props = {
'annotators': 'tokenize,ssplit,pos,lemma,ner,parse,depparse,dcoref,relation,sentiment',
'pipelineLanguage': 'en',
'outputFormat': 'json'
}
def word_tokenize(self, sentence):
return self.nlp.word_tokenize(sentence)
def pos(self, sentence):
return self.nlp.pos_tag(sentence)
def ner(self, sentence):
return self.nlp.ner(sentence)
def parse(self, sentence):
return self.nlp.parse(sentence)
def dependency_parse(self, sentence):
return self.nlp.dependency_parse(sentence)
def annotate(self, sentence):
return json.loads(self.nlp.annotate(sentence, properties=self.props))
#staticmethod
def tokens_to_dict(_tokens):
tokens = defaultdict(dict)
for token in _tokens:
tokens[int(token['index'])] = {
'word': token['word'],
'lemma': token['lemma'],
'pos': token['pos'],
'ner': token['ner']
}
return tokens
if __name__ == '__main__':
sNLP = StanfordNLP()
text = r'China on Wednesday issued a $50-billion list of U.S. goods including soybeans and small aircraft for possible tariff hikes in an escalating technology dispute with Washington that companies worry could set back the global economic recovery.The country\'s tax agency gave no date for the 25 percent increase...'
ANNOTATE = sNLP.annotate(text)
POS = sNLP.pos(text)
TOKENS = sNLP.word_tokenize(text)
NER = sNLP.ner(text)
PARSE = sNLP.parse(text)
DEP_PARSE = sNLP.dependency_parse(text)
I am only interested in Entity Recognition which is being saved in the variable NER. The command NER is giving the following result
The same thing if I run on Stanford Website, the output for NER is
There are 2 problems with my Python Code:
1. '$' and '50-billion' should be combined and named a single entity.
Similarly, I want '25' and 'percent' as a single entity as it is showing in the online stanford output.
2. In my output, 'Washington' is shown as State and 'China' is shown as Country. I want both of them to be shown as 'Loc' as in the stanford website output. The possible solution to this problem lies in the documentation .
But I don't know which model am I using and how to change the model.
Here is a way you can solve this
Make sure to download Stanford CoreNLP 3.9.1 and the necessary models jars
Set up the server properties in this file "ner-server.properties"
annotators = tokenize,ssplit,pos,lemma,ner
ner.applyFineGrained = false
Start the server with this command:
java -Xmx12g edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000 -serverProperties ner-server.properties
Make sure you've installed this Python package:
https://github.com/stanfordnlp/python-stanford-corenlp
Run this Python code:
import corenlp
client = corenlp.CoreNLPClient(start_server=False, annotators=["tokenize", "ssplit", "pos", "lemma", "ner"])
sample_text = "Joe Smith was born in Hawaii."
ann = client.annotate(sample_text)
for mention in ann.sentence[0].mentions:
print([x.word for x in ann.sentence[0].token[mention.tokenStartInSentenceInclusive:mention.tokenEndInSentenceExclusive]])
Here are all the fields available in the EntityMention for each entity:
sentenceIndex: 0
tokenStartInSentenceInclusive: 5
tokenEndInSentenceExclusive: 7
ner: "MONEY"
normalizedNER: "$5.0E10"
entityType: "MONEY"

NaiveBayes Classifier

I am having a problem with naive bayes classifier, I am trying to analyse some sentences but I have some errors in python.
from naiveBayesClassifier.trainedData import TrainedData
class Trainer(object):
"""docstring for Trainer"""
def __init__(self, tokenizer):
super(Trainer, self).__init__()
self.tokenizer = tokenizer
self.data = TrainedData()
def train(self, text, className):
"""
enhances trained data using the given text and class
"""
self.data.increaseClass(className)
tokens = self.tokenizer.tokenize(text)
for token in tokens:
token = self.tokenizer.remove_stop_words(token)
token = self.tokenizer.remove_punctuation(token)
self.data.increaseToken(token, className)
That's the error in the console: Does anyone knows how to fix the problem? Thanks
tokens = self.tokenizer.tokenize(text)
AttributeError: module 'naiveBayesClassifier.tokenizer' has no attribute 'tokenize'
Thats the main class:
from naiveBayesClassifier import tokenizer
from naiveBayesClassifier.trainer import Trainer
from naiveBayesClassifier.classifier import Classifier
postTrainer = Trainer(tokenizer)
postsSet = [
{'text': 'not to eat too much is not enough to lose weight', 'category': 'health'},
{'text': 'Russia try to invade Ukraine', 'category': 'politics'},
{'text': 'do not neglect exercise', 'category': 'health'},
{'text': 'Syria is the main issue, Obama says', 'category': 'politics'}
]
for post in postsSet:
postTrainer.train(post['text'], post['category'])
postClassifier = Classifier(postTrainer.data, tokenizer)
classification = postClassifier.classify("Obama is")
print(classification)

Disable default behavior - ModelForm Select widget choices filled with referenced objects

I am using a form in Django that is based on a model. So it looks like this:
240 class EventDetailForm(NgFormValidationMixin, NgModelForm):
241 def __init__(self, *args, **kwargs):tha
242 super(EventDetailForm, self).__init__(*args, **kwargs)
243 self.fields['gallery'].queryset = Gallery.objects.none()
244
245 class Meta:
246 model = Event
247 fields = ('title', 'description', 'end_date', 'start_date', 'gallery', 'cover_photo')
248 widgets = {
249 'title': forms.TextInput(attrs={
250 'editable-detail': '',
251 }),
252 'description': forms.TextInput(attrs={
253 'class': 'panel-body',
254 'id': 'event-description-editable',
255 'editable-detail': '',
256 }),
257 'cover_photo': SelectWithDefaultOptions(attrs={
258 'class': 'chosen-select-no-single',
259 'id': 'select-cover-photo',
260 'data-placeholder': 'Select Cover Photo',
261 'style': 'width: 200px;',
262 'tabindex': '-1',
263 }),
264 'start_date': DateTimeWidget(attrs = {
265 'class': 'datetimepicker col-xs-6',
266 'id': 'event-start-date-editable',
267 'editable-detail': '',
268 }),
269 'end_date': DateTimeWidget(attrs = {
270 'class': 'datetimepicker col-xs-6',
271 'id': 'event-end-date-editable',
272 'editable-detail': '',
273 }),
274 'gallery': SelectWithDefaultOptions(attrs={
275 'class': 'chosen-select-no-single',
276 'id': 'select-galley',
277 'data-placeholder': 'Select Gallery',
278 'style': 'width: 200px;',
279 'gallery-select': '',
280 'tabindex': '-1',
281 'organisator-profile-specific': '',
282 }
283 }
So what happens is that my gallery and cover_photo select widgets get filled with all the existing objects of the two types ( because they are actually foreign keys to other models ).
I want to prevent that, and as you see on line 243 I have tried to delete the current queryset ( tried with cleaning choices too resulting in the same ) which works pretty well. The problem is that as you see I use my custom select widget in which I set some default options. It looks like this:
62 class SelectWithDefaultOptions(forms.Select):
63 def __init__(self, attrs=None, choices=()):
64 super(SelectWithDefaultOptions, self).__init__(attrs, choices)
65
66 choices = ('', 'empty') + choices
67 choices = ('None', 'no selection') + choices
The problem is that with the approach I mentioned above I delete those values.
So I said to myself "Well, I will get the needed values, erase all and put the preferred back". Tried it but it turned out that actually the objects that Django puts in are deleting the ones that have been set. ( adds the default ones after the init method has passed)
So I thought "Well, if I set choices=() in the initialisation of the widget (line 274), Django should not set any other values on top of that, because this will violate my choices" so I tried it, but it turned out that Django actually does not care about what choices I would like there to be and act the same.
Also tried to set the field's 'initial' property, still no results.
So, how do I prevent DJango default behviour of putting the referenced objects into the choices list of my select?
Thanks.
This is how I fixed it, with a little help from #AamirAdnan:
65 class SelectWithDefaultOptions(forms.Select):
66 def render(self, name, value, attrs=None, choices=()):
67 choices = ()
68 choices += (('empty', ''),)
69 choices += (('no selection', 'None'),)
70 self.choices = choices
71
72 if value is None:
73 value = ''
74
75 final_attrs = self.build_attrs(attrs, name=name)
76 output = [format_html('<select{0}>', flatatt(final_attrs))]
77 options = self.render_options((), [value])
78
79 if options:
80 output.append(options)
81 output.append('</select>')
82
83 return mark_safe('\n'.join(output))
Just improved the widget render function.
It is not exactly what was asked for, but it works. What happens is:
1. Django sets the choices value to the referenced objects
2. Before the widget is rendered, they are changed to the correct choices
Somehow the widget defined in the Meta is causing this issue. If you just moved it to __init__ it will work. First update your widget, The keyword choices in line def __init__(self, attrs=None, choices=()) means that the choices by default is empty but any instance may override it passing in some values. So you need to set it empty tuple explicitly:
class SelectWithDefaultOptions(forms.Select):
def __init__(self, attrs=None, choices=()):
super(SelectWithDefaultOptions, self).__init__(attrs, choices)
choices = () # explicitly setting choices to empty here
choices += (('', 'empty'),)
choices += (('None', 'no selection'),)
self.choices = choices
Now update your form to assign widget to gallery field in __init__ rather than in Meta class:
class EventDetailForm(NgFormValidationMixin, NgModelForm):
def __init__(self, *args, **kwargs):
super(EventDetailForm, self).__init__(*args, **kwargs)
self.fields['gallery'].widget = SelectWithDefaultOptions(attrs={
'class': 'chosen-select-no-single',
'id': 'select-galley',
'data-placeholder': 'Select Gallery',
'style': 'width: 200px;',
'gallery-select': '',
'tabindex': '-1',
'organisator-profile-specific': '',
}
class Meta:
model = Event
fields = ('title', 'description', 'end_date', 'start_date', 'gallery', 'cover_photo')
OR You don't need any custom widget at all. Just set the choices in forms __init__ method and replace the SelectWithDefaultOptions widget name with forms.Select in Meta (This is cleaner and simpler):
class EventDetailForm(NgFormValidationMixin, NgModelForm):
def __init__(self, *args, **kwargs):
super(EventDetailForm, self).__init__(*args, **kwargs)
self.fields['gallery'].widget.choices = (('', 'Empty',),)
class Meta:
model = Event
fields = ('title', 'description', 'end_date', 'start_date', 'gallery', 'cover_photo')
widgets = {
'gallery': forms.Select(attrs={
'class': 'chosen-select-no-single',
'id': 'select-galley',
'data-placeholder': 'Select Gallery',
'style': 'width: 200px;',
'gallery-select': '',
'tabindex': '-1',
'organisator-profile-specific': '',
}),
}

Categories

Resources