Database table mytable structure
id id_card
INT VARCHAR (32)
where id is the self-increasing primary key
how to gracefully implement if id_card insertion when it does not exist, of course, thread-safe can not be repeated.
my implementation is to add a global lock at the code level:
-sharp threading.lock
if os.name != "nt":
import fcntl
class GlobalLock(object):
def __init__(self, lock_file):
self.lock_file = "/tmp/" + lock_file
def acquire(self):
if os.name != "nt":
self.fn = open(self.lock_file, "w")
fcntl.flock(self.fn.fileno(), fcntl.LOCK_EX)
def release(self):
if os.name != "nt":
self.fn.close()
__gl__ = GlobalLock("__xmx__")
def global_lock(fp):
def _d(*args, **kw):
try:
__gl__.acquire()
r = fp(*args, **kw)
finally:
__gl__.release()
return r
return _d
Database operation code:
from sqlalchemy import func
from models.models import MyTable -sharp Model
def is_id_card_exists(id_card):
try:
-sharp scope_session
s = db.create_scoped_session()
result = s.query(MyTable).filter(func.upper(MyTable.id_card) == func.upper(id_card)).first()
s.remove()
return True if result is not None else False
except Exception as e:
logging.error("_app.dbhelper.py is_id_card_exists exception:" + str(e))
return True
-sharp mytable
@global_lock
def add_record(id_card):
try:
-sharp
if is_id_card_exists(id_card) is True:
return None
s = MyTable(name)
db.session.add(s)
db.session.commit()
return s
except Exception as e:
logging.error("_app.dbhelper.py add_record exception:" + str(e))
return None
I have tried to use MySQL to lock tables directly, but because the db.session cache problem has not been solved, is there an elegant solution? Of course, you can set the id_card field as the primary key, but I think this is not to solve the problem, but to bypass the problem.