pytest automation apparently running tests during test collection phase - python

Running webui automated tests with pytest and selenium; having an issue where it appears that my tests are actually running during the collection phase. During this phase, I would expect pytest to be collecting tests - not running them. The end result is I end up with 6 test results where I would expect 2. Now the interesting piece, the 6 results only appear in the HTML report; on the command line I only get the expected 2 lines of output (but it 300 seconds to run those two tests because the tests are literally running multiple times).
tests/test_datadriven.py
#!/usr/bin/env python
from unittestzero import Assert
from pages.home import Home
from pages.administration import RolesTab
from api.api import ApiTasks
import time
import pytest
from data.datadrv import *
class TestRolesDataDriven(object):
scenarios = [scenario1,scenario2]
#pytest.mark.challenge
def test_datadriven_rbac(self, mozwebqa, org, perm_name, resource, verbs, allowed, disallowed):
"""
Perform a data driven test related to role based access controls.
All parameters are fullfilled by the data.
:param org: Organization Name
:param perm_name: Permission name
:param resource: Resource
:param verbs: A tuple of verbs
:returns: Pass or Fail for the test
"""
sysapi = ApiTasks(mozwebqa)
home_page = Home(mozwebqa)
rolestab = RolesTab(mozwebqa)
role_name = "role_%s" % (home_page.random_string())
perm_name = "perm_%s" % (home_page.random_string())
username = "user%s" % home_page.random_string()
email = username + "#example.com"
password = "redhat%s" % (home_page.random_string())
sysapi.create_org(org)
sysapi.create_user(username, password, email)
home_page.login()
home_page.tabs.click_tab("administration_tab")
home_page.tabs.click_tab("roles_administration")
home_page.click_new()
rolestab.create_new_role(role_name)
rolestab.click_role_permissions()
rolestab.role_org(org).click()
rolestab.click_add_permission()
rolestab.select_resource_type(resource)
home_page.click_next()
for v in verbs:
home_page.select('verbs', v)
home_page.click_next()
rolestab.enter_permission_name(perm_name)
rolestab.enter_permission_desc('Added by QE test.')
rolestab.click_permission_done()
rolestab.click_root_roles()
rolestab.click_role_users()
rolestab.role_user(username).add_user()
home_page.header.click_logout()
home_page.login(username, password)
for t in allowed:
Assert.true(t(home_page))
for t in disallowed:
Assert.false(t(home_page))
data/data.py
###
# DO NOT EDIT HERE
###
def pytest_generate_tests(metafunc):
"""
Parse the data provided in scenarios.
"""
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items]))
metafunc.parametrize(argnames, argvalues, ids=idlist)
###
# EDIT BELOW
# ADD NEW SCENARIOS
###
scenario1 = ('ACME_Manage_Keys', { 'org': 'ACME_Corporation',
'perm_name': 'ManageAcmeCorp',
'resource': 'activation_keys',
'verbs': ('manage_all',),
'allowed': (Base.is_system_tab_visible,
Base.is_new_key_visible,
Base.is_activation_key_name_editable),
'disallowed': (Base.is_dashboard_subs_visible,)})
scenario2 = ('Global_Read_Only', { 'org': 'Global Permissions',
'perm_name': 'ReadOnlyGlobal',
'resource': 'organizations',
'verbs': ('read','create'),
'allowed': (Base.is_organizations_tab_visible,
Base.is_new_organization_visible,
Base.is_new_organization_name_field_editable),
'disallowed': (Base.is_system_tab_visible,
Base.is_new_key_visible)})
Full source is available at github; https://github.com/eanxgeek/katello_challenge
Anyone have any idea what might be going on here? I am using the pytest-mozwebqa plugin, pytests, and selenium.
Thanks!

Check the version of pytest-mozwebqa you have installed. If your installed version is < 0.10 then you must update.
pip-python install --upgrade pytest-mozwebqa
Due to the number of changes in pytest-mozwebqa I strongly encourage you to test first in a python virtualenv.

Related

Pytest - run same tests for different sets of input data

I have a number of functions that I want to test using pytest.
Throughout my testing, I use several input files that I specify at the top of the script:
import pytest
from mymodule.mymodule import *
test_bam = 'bam/test/test_reads_pb.bwa.bam'
sample_mapping_file = 'tests/test_sample_mapping.txt'
pb_errors_file = 'tests/data/pb_test_out.json'
pb_stats = 'tests/data/pb_stats.json'
I am then running several tests using this input:
#pytest.fixture
def options():
o, a = get_args()
return o
#pytest.fixture
def samples_dict():
d = get_sample_mapping(sample_mapping_file)
return d
#pytest.fixture
def read_stats(options, samples_dict):
stats, bam = clean_reads(options, test_bam, samples_dict)
return stats
#pytest.fixture
def clean_bam(options, samples_dict):
stats, bam = clean_reads(options, test_bam, samples_dict)
return bam
def test_errors(options, samples_dict, clean_bam):
"""Test successful return from find_errors"""
sample, genome, chrom = set_genome(options, test_bam, samples_dict)
base_data = find_errors(options, chrom, sample, clean_bam)
assert base_data
I would like to be able to run the same tests on multiple different sets of input, where test_bam, sample_mapping_file, pb_errors_file and pb_stats will all be different.
What's the best way of running the same tests on different sets of input data?
I've played around with using marks to run input-specific functions:
#pytest.mark.pd
def get_pb_data():
"""Read in all pb-related files"""
#pytest.mark.ab
def get_ab_data():
"""Read in all ab-related files"""
But this doesn't work with the fixtures that I have set up (unless I'm missing something).
Any advice would be great!
use pytest parametrize wrapper.
test_bam = 'bam/test/test_reads_pb.bwa.bam'
sample_mapping_file = 'tests/test_sample_mapping.txt'
pb_errors_file = 'tests/data/pb_test_out.json'
pb_stats = 'tests/data/pb_stats.json'
#pytest.mark.parametrize("config", [test_bam, sample_mapping_file, pb_errors_file, pb_stats])
def do_something(config):
#
It will create multiple test on every config test input and assign to config variable.
#pytest.mark.pd doesn't specify an input type, it adds pd marker to the test which can be used when running the tests, for example running all the tests marked with pd
pytest TestsFolder -m pd
If you want to run the tests on different sets of files you can store the files names in a csv for example and read the sets from there in the test parametrized marker
def data_source():
for files in read_files_groups_from_csv():
yield files
#pytest.mark.parametrize('files', data_source())
def test_errors(options, samples_dict, clean_bam, files):
"""for example, files parameter will be ['bam/test/test_reads_pb.bwa.bam', 'tests/test_sample_mapping.txt', 'tests/data/pb_test_out.json', 'tests/data/pb_stats.json']"""
mark.parametrize will run before the fixtures, so you can send files as a parameter to them as well
#pytest.fixture
def options(files):
d = samples_dict(files[1])
return d
If you don't want to rely on index create a class with files names as attributes and return it from data_source().

How change one value to another in one place and use it in couple functions?

I'm writing test automation for API in BDD behave. I need a switcher between environments. Is any possible way to change one value in one place without adding this value to every functions? Example:
I've tried to do it by adding value to every function but its makes all project very complicated
headers = {
'Content-Type': 'application/json',
'country': 'fi'
}
what i what to switch only country value in headers e.g from 'fi' to 'es'
and then all function should switch themselves to es environment, e.g
def sending_post_request(endpoint, user):
url = fi_api_endpoints.api_endpoints_list.get(endpoint)
personalId = {'personalId': user}
json_post = requests.post(url,
headers=headers,
data=json.dumps(personalId)
)
endpoint_message = json_post.text
server_status = json_post.status_code
def phone_number(phone_number_status):
if phone_number_status == 'wrong':
cursor = functions_concerning_SQL_conection.choosen_db('fi_sql_identity')
cursor.execute("SELECT TOP 1 PersonalId from Registrations where PhoneNumber is NULL")
result = cursor.fetchone()
user_with_no_phone_number = result[0]
return user_with_no_phone_number
else:
cursor = functions_concerning_SQL_conection.choosen_db('fi_sql_identity')
cursor.execute("SELECT TOP 1 PersonalId from Registrations where PhoneNumber is not NULL")
result = cursor.fetchone()
user_with_phone_number = result[0]
return user_with_phone_number
and when i will change from 'fi' to 'es' in headers i want:
fi_sql_identity change to es_sql_identity
url = fi_api_endpoints.api_endpoints_list.get(endpoint) change to
url = es_api_endpoints.api_endpoints_list.get(endpoint)
thx and please help
With respect to your original question, a solution for this case is closure:
def f(x):
def long_calculation(y):
return x * y
return long_calculation
# create different functions without dispatching multiple times
g = f(val_1)
h = f(val_2)
g(val_3)
h(val_3)
Well, the problem is why do you hardcode everything? With the update you can simplify your function as:
def phone_number(phone_number_status, db_name='fi_sql_identity'):
cursor = functions_concerning_SQL_conection.choosen_db(db_name)
if phone_number_status == 'wrong':
sql = "SELECT TOP 1 PersonalId from Registrations where PhoneNumber is NULL"
else:
sql = "SELECT TOP 1 PersonalId from Registrations where PhoneNumber is not NULL"
cursor.execute(sql)
result = cursor.fetchone()
return result[0]
Also please don't write like:
# WRONG
fi_db_conn.send_data()
But use a parameter:
region = 'fi' # or "es"
db_conn = initialize_conn(region)
db_conn.send_data()
And use a config file to store your endpoints with respect to your region, e.g. consider YAML:
# config.yml
es:
db_name: es_sql_identity
fi:
db_name: fi_sql_identity
Then use them in Python:
import yaml
with open('config.yml') as f:
config = yaml.safe_load(f)
region = 'fi'
db_name = config[region]['db_name'] # "fi_sql_identity"
# status = ...
result = phone_number(status, db_name)
See additional useful link for using YAML.
First, provide an encapsulation how to access the resources of a region by providing this encapsulation with a region parameter. It may also be a good idea to provide this functionality as a behave fixture.
CASE 1: region parameter needs to vary between features / scenarios
For example, this means that SCENARIO_1 needs region="fi" and SCENARIO_2 needs region="es".
Use fixture and fixture-tag with region parameter.
In this case you need to write own scenarios for each region (BAD TEST REUSE)
or use a ScenarioOutline as template to let behave generate the tests for you (by using a fixture-tag with a region parameter value for example).
CASE 2: region parameter is constant for all features / scenarios (during test-run)
You can support multiple test-runs with different region parameters by using a userdata parameter.
Look at behave userdata concept.
This allows you to run behave -D region=fi ... and behave -D region=es ...
This case provides a better reuse of testsuite, meaning a large part of the testsuite is the common testsuite that is applied to all regions.
HINT: Your code examples are too specific ("fi" based) which is a BAD-SMELL.

Add metadata to TestCase in Python's unittest

I'd like to add metadata to individual tests in a TestCase that I've written to use Python's unittest framework. The metadata (a string, really) needs to be carried through the testing process and output to an XML file.
Other than remaining with the test the data isn't going to be used by unittest, nor my test code. (I've got a program that will run afterwards, open the XML file, and go looking for the metadata/string).
I've previously used NUnit which allows one to use C# attribute to do this. Specifically, you can put this above a class:
[Property("SmartArrayAOD", -3)]
and then later find that in the XML output.
Is it possible to attach metadata to a test in Python's unittest?
Simple way for just dumping XML
If all you want to do is write stuff to an XML file after every unit test, just add a tearDown method to your test class (e.g. if you have , give it a).
class MyTest(unittest.TestCase):
def tearDown(self):
dump_xml_however_you_do()
def test_whatever(self):
pass
General method
If you want a general way to collect and track metadata from all your tests and return it at the end, try creating an astropy table in your test class's __init__() and adding rows to it during tearDown(), then extracting a reference to your initialized instances of your test class from unittest, like this:
Step 1: set up a re-usable subclass of unittest.TestCase so we don't have to duplicate the table handling
(put all the example code in the same file or copy the imports)
"""
Demonstration of adding and retrieving meta data from python unittest tests
"""
import sys
import warnings
import unittest
import copy
import time
import astropy
import astropy.table
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
class DemoTest(unittest.TestCase):
"""
Demonstrates setup of an astropy table in __init__, adding data to the table in tearDown
"""
def __init__(self, *args, **kwargs):
super(DemoTest, self).__init__(*args, **kwargs)
# Storing results in a list made it convenient to aggregate them later
self.results_tables = [astropy.table.Table(
names=('Name', 'Result', 'Time', 'Notes'),
dtype=('S50', 'S30', 'f8', 'S50'),
)]
self.results_tables[0]['Time'].unit = 'ms'
self.results_tables[0]['Time'].format = '0.3e'
self.test_timing_t0 = 0
self.test_timing_t1 = 0
def setUp(self):
self.test_timing_t0 = time.time()
def tearDown(self):
test_name = '.'.join(self.id().split('.')[-2:])
self.test_timing_t1 = time.time()
dt = self.test_timing_t1 - self.test_timing_t0
# Check for errors/failures in order to get state & description. https://stackoverflow.com/a/39606065/6605826
if hasattr(self, '_outcome'): # Python 3.4+
result = self.defaultTestResult() # these 2 methods have no side effects
self._feedErrorsToResult(result, self._outcome.errors)
problem = result.errors or result.failures
state = not problem
if result.errors:
exc_note = result.errors[0][1].split('\n')[-2]
elif result.failures:
exc_note = result.failures[0][1].split('\n')[-2]
else:
exc_note = ''
else: # Python 3.2 - 3.3 or 3.0 - 3.1 and 2.7
# result = getattr(self, '_outcomeForDoCleanups', self._resultForDoCleanups) # DOESN'T WORK RELIABLY
# This is probably only good for python 2.x, meaning python 3.0, 3.1, 3.2, 3.3 are not supported.
exc_type, exc_value, exc_traceback = sys.exc_info()
state = exc_type is None
exc_note = '' if exc_value is None else '{}: {}'.format(exc_type.__name__, exc_value)
# Add a row to the results table
self.results_tables[0].add_row()
self.results_tables[0][-1]['Time'] = dt*1000 # Convert to ms
self.results_tables[0][-1]['Result'] = 'pass' if state else 'FAIL'
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=astropy.table.StringTruncateWarning)
self.results_tables[0][-1]['Name'] = test_name
self.results_tables[0][-1]['Notes'] = exc_note
Step 2: set up a test manager that extracts metadata
def manage_tests(tests):
"""
Function for running tests and extracting meta data
:param tests: list of classes sub-classed from DemoTest
:return: (TextTestResult, Table, string)
result returned by unittest
astropy table
string: formatted version of the table
"""
table_sorting_columns = ['Result', 'Time']
# Build test suite
suite_list = []
for test in tests:
suite_list.append(unittest.TestLoader().loadTestsFromTestCase(test))
combo_suite = unittest.TestSuite(suite_list)
# Run tests
results = [unittest.TextTestRunner(verbosity=1, stream=StringIO(), failfast=False).run(combo_suite)]
# Catch test classes
suite_tests = []
for suite in suite_list:
suite_tests += suite._tests
# Collect results tables
results_tables = []
for suite_test in suite_tests:
if getattr(suite_test, 'results_tables', [None])[0] is not None:
results_tables += copy.copy(suite_test.results_tables)
# Process tables, if any
if len(results_tables):
a = []
while (len(a) == 0) and len(results_tables):
a = results_tables.pop(0) # Skip empty tables, if any
results_table = a
for rt in results_tables:
if len(rt):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
results_table = astropy.table.join(results_table, rt, join_type='outer')
try:
results_table = results_table.group_by(table_sorting_columns)
except Exception:
print('Error sorting test results table. Columns may not be in the preferred order.')
column_names = list(results_table.columns.keys())
alignments = ['<' if cn == 'Notes' else '>' for cn in column_names]
if len(results_table):
rtf = '\n'.join(results_table.pformat(align=alignments, max_width=-1))
exp_res = sum([result.testsRun - len(result.skipped) for result in results])
if len(results_table) != exp_res:
print('ERROR forming results table. Expected {} results, but table length is {}.'.format(
exp_res, len(results_table),
))
else:
rtf = None
else:
results_table = rtf = None
return results, results_table, rtf
Step 3: Example usage
class FunTest1(DemoTest):
#staticmethod
def test_pass_1():
pass
#staticmethod
def test_fail_1():
assert False, 'Meant to fail for demo 1'
class FunTest2(DemoTest):
#staticmethod
def test_pass_2():
pass
#staticmethod
def test_fail_2():
assert False, 'Meant to fail for demo 2'
res, tab, form = manage_tests([FunTest1, FunTest2])
print(form)
print('')
for r in res:
print(r)
for error in r.errors:
print(error[0])
print(error[1])
Sample results:
$ python unittest_metadata.py
Name Result Time Notes
ms
-------------------- ------ --------- ----------------------------------------
FunTest2.test_fail_2 FAIL 5.412e-02 AssertionError: Meant to fail for demo 2
FunTest1.test_fail_1 FAIL 1.118e-01 AssertionError: Meant to fail for demo 1
FunTest2.test_pass_2 pass 6.199e-03
FunTest1.test_pass_1 pass 6.914e-03
<unittest.runner.TextTestResult run=4 errors=0 failures=2>
Should work with python 2.7 or 3.7. You can add whatever columns you want to the table. You can add parameters and stuff to the table in setUp, tearDown, or even during the tests.
Warnings:
This solution accesses a protected attribute _tests of unittest.suite.TestSuite, which can have unexpected results. This specific implementation works as expected for me in python2.7 and python3.7, but slight variations on how the suite is built and interrogated can easily lead to strange things happening. I couldn't figure out a different way to extract references to the instances of my classes that unittest uses, though.

Arcpy.python ArcServer sde permissions

I have this python script that basically selects a point by it's ID, then selects all points within a distance and returns only a subset of those that match the type field. i.e. find all hospitals within 3 miles of this location..
My python script works, does what it's supposed to. So I create a GP service from it. Add the service back into the map and run it. Now I notice no matter what distance I use, the data doesn't change. If I delete the created Featureclass to see if it's really working. It does not create a new featureclass but it says it completed successfully.
Now the weird part, if I hit the GP service at the rest endpoint with the parameters it says it works but returns no records. I've been careful to avoid schema locks when using the Rest endpoint ArcMap and ArcCatalog are closed.
It's like the GP service doesn't have permission to write to the sde database, However my sde connection works fine on my PC.
Any ideas?
import arcpy, os, string
from arcpy import env
db = r"Connection to racdev1.sde"
theworkspace = r"Connection to racdev1.sde"
arcpy.env.workspace = theworkspace
arcpy.env.overwriteOutput = True
#facilityID = '1249'
facilityID = arcpy.GetParameterAsText(0)
#facilityIDType= 'PFI'
facilityIDType = arcpy.GetParameterAsText(1)
thedistance = arcpy.GetParameterAsText(2)
#thedistance = '3 miles'
#withindistance = "3 Miles"
withindistance = thedistance + ' Miles'
sql_query = "\"%s\" = '%s'" % ("ID", facilityID)
sql_query2 = "\"%s\" = '%s'" % ("IDTYPE", facilityIDType)
# Local variables:
Facilities = "DOHGIS.NYSDOH_CI_DATA"
featLayer = "thePts"
arcpy.MakeFeatureLayer_management(Facilities, featLayer)
# Process: Select Layer By Attribute
arcpy.SelectLayerByAttribute_management(featLayer, "NEW_SELECTION", sql_query)
# Process: Select Layer By Location 311
arcpy.SelectLayerByLocation_management(featLayer, "WITHIN_A_DISTANCE",featLayer, withindistance, "NEW_SELECTION")
#print " now for the subset"
arcpy.SelectLayerByAttribute_management("thePts", "SUBSET_SELECTION", sql_query2 )
# creaate the new featureclss..
arcpy.CopyFeatures_management("thePts",'DOHGIS.NYSDOH_FacilitiesQuery')
#print "Done"

Python string comparison using pymarc marc8_to_unicode no longer working

My code imports a MARC file using MARCReader and compares a string against a list of acceptable answers. If the string from MARC has no match in my list, it gets added to an error list. This has worked for years in Python 2.7.4 installations on Windows 7 with no issue. I recently got a Windows 10 machine and installed Python 2.7.10, and now strings with non-standard characters fail that match. the issue is not Python 2.7.10 alone; I've installed every version from 2.7.4 through 2.7.10 on this new machine, and get the same problem. A new install of Python 2.7.10 on a Windows 7 machine also gets the problem.
I've trimmed out functions that aren't relevant, and I've dramatically trimmed the master list. In this example, "Académie des Sciences" is an existing repository, but "Acadm̌ie des Sciences" now appears in our list of new repositories.
# -*- coding: utf-8 -*-
from aipmarc import get_catdb, get_bibno, parse_date
from phfawstemplate import browsepage #, nutchpage, eadpage, titlespage
from pymarc import MARCReader, marc8_to_unicode
from time import strftime
from umlautsort import alafiling
import urllib2
import sys
import os
import string
def make_newrepos_list(list, fn): # Create list of unexpected repositories found in the MArcout database dump
output = "These new repositories are not yet included in the master list in phfaws.py. Please add the repository code (in place of ""NEWCODE*""), and the URL (in place of ""TEST""), and then add these lines to phfaws.py. Please keep the list alphabetical. \nYou can find repository codes at http://www.loc.gov/marc/organizations/ \n \n"
for row in list:
output = '%s reposmasterlist.append([u"%s", "%s", "%s"])\n' % (output, row[0], row[1], row[2])
fh = open(fn,'w')
fh.write(output.encode("utf-8"))
fh.close()
def main(marcfile):
reader = MARCReader(file(marcfile))
'''
Creating list of preset repository codes.
'''
reposmasterlist =[[u"American Institute of Physics", "MdCpAIP", "http://www.aip.org/history/nbl/index.html"]]
reposmasterlist.append([u"Académie des Sciences", "FrACADEMIE", "http://www.academie-sciences.fr/fr/Transmettre-les-connaissances/inventaires-des-fonds-d-archives-personnelles.html"])
reposmasterlist.append([u"American Association for the Advancement of Science", "daaas", "http://archives.aaas.org/"])
newreposcounter = 0
newrepos = ""
newreposlist = []
findingaidcounter = 0
reposcounter = 0
for record in reader:
if record['903']: # Get only records where 903a="PHFAWS"
phfawsfull = record.get_fields('903')
for field in phfawsfull:
phfawsnote = field['a']
if 'PHFAWS' in phfawsnote:
if record['852'] is not None: # Get only records where 852/repository is not blank
repository = record.get_fields('852')
for field in repository:
reposname = field['a']
reposname = marc8_to_unicode(reposname) # Convert repository name from MARC file to Unicode
reposname = reposname.rstrip('.,')
reposcode = None
reposurl = None
for row in reposmasterlist: # Match field 852 repository against the master list.
if row[0] == reposname: # If it's in the master list, use the master list to populate our repository-related fields
reposcode = row[1]
reposurl = row[2]
if record['856'] is not None: # Get only records where 856 is not blank and includes "online finding aid"
links = record.get_fields('856')
for field in links:
linksthree = field['3']
if linksthree is not None and "online finding aid" in linksthree:
if reposcode == None: # If this record's repository wasn't in the master list, add to list of new repositories
newreposcounter += 1
newrepos = '%s %s \n' % (newrepos, reposname)
reposcode = "NEWCODE" + str(newreposcounter)
reposurl = "TEST"
reposmasterlist.append([reposname, reposcode, reposurl])
newreposlist.append([reposname, reposcode, reposurl])
human_url = field['u']
else:
pass
else:
pass
else:
pass
else:
pass
else:
pass
# Output list of new repositories
newreposlist.sort(key = lambda rep: rep[0])
if newreposcounter != 0:
status = '%d new repositories found. you must add information on these repositories, then run phfaws.py again. Please see the newly updated rewrepos.txt for details.' % (newreposcounter)
sys.stderr.write(status)
make_newrepos_list(newreposlist, 'newrepos.txt')
if __name__ == '__main__':
try:
mf = sys.argv[1]
sys.exit(main(mf))
except IndexError:
sys.exit('Usage: %s <marcfile>' % sys.argv[0])
Edit: I've found that simply commenting out the "reposname = marc8_to_unicode(reposname)" line gets me the results I want. I still don't understand why this is, since it was a necessary step before.
Edit: I've found that simply commenting out the "reposname = marc8_to_unicode(reposname)" line gets me the results I want. I still don't understand why this is, since it was a necessary step before.
This suggests to me that the encoding of strings in your database changed from MARC8 to Unicode. Have you upgraded your cataloging system recently?

Categories

Resources