123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064 |
- # mssql/base.py
- # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
- # <see AUTHORS file>
- #
- # This module is part of SQLAlchemy and is released under
- # the MIT License: http://www.opensource.org/licenses/mit-license.php
- """
- .. dialect:: mssql
- :name: Microsoft SQL Server
- Auto Increment Behavior
- -----------------------
- SQL Server provides so-called "auto incrementing" behavior using the
- ``IDENTITY`` construct, which can be placed on an integer primary key.
- SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
- described at :paramref:`.Column.autoincrement`; this means
- that by default, the first integer primary key column in a :class:`.Table`
- will be considered to be the identity column and will generate DDL as such::
- from sqlalchemy import Table, MetaData, Column, Integer
- m = MetaData()
- t = Table('t', m,
- Column('id', Integer, primary_key=True),
- Column('x', Integer))
- m.create_all(engine)
- The above example will generate DDL as:
- .. sourcecode:: sql
- CREATE TABLE t (
- id INTEGER NOT NULL IDENTITY(1,1),
- x INTEGER NULL,
- PRIMARY KEY (id)
- )
- For the case where this default generation of ``IDENTITY`` is not desired,
- specify ``autoincrement=False`` on all integer primary key columns::
- m = MetaData()
- t = Table('t', m,
- Column('id', Integer, primary_key=True, autoincrement=False),
- Column('x', Integer))
- m.create_all(engine)
- .. note::
- An INSERT statement which refers to an explicit value for such
- a column is prohibited by SQL Server, however SQLAlchemy will detect this
- and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
- time. As this is not a high performing process, care should be taken to
- set the ``autoincrement`` flag appropriately for columns that will not
- actually require IDENTITY behavior.
- Controlling "Start" and "Increment"
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Specific control over the parameters of the ``IDENTITY`` value is supported
- using the :class:`.schema.Sequence` object. While this object normally
- represents an explicit "sequence" for supporting backends, on SQL Server it is
- re-purposed to specify behavior regarding the identity column, including
- support of the "start" and "increment" values::
- from sqlalchemy import Table, Integer, Sequence, Column
- Table('test', metadata,
- Column('id', Integer,
- Sequence('blah', start=100, increment=10),
- primary_key=True),
- Column('name', String(20))
- ).create(some_engine)
- would yield:
- .. sourcecode:: sql
- CREATE TABLE test (
- id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
- name VARCHAR(20) NULL,
- )
- Note that the ``start`` and ``increment`` values for sequences are
- optional and will default to 1,1.
- INSERT behavior
- ^^^^^^^^^^^^^^^^
- Handling of the ``IDENTITY`` column at INSERT time involves two key
- techniques. The most common is being able to fetch the "last inserted value"
- for a given ``IDENTITY`` column, a process which SQLAlchemy performs
- implicitly in many cases, most importantly within the ORM.
- The process for fetching this value has several variants:
- * In the vast majority of cases, RETURNING is used in conjunction with INSERT
- statements on SQL Server in order to get newly generated primary key values:
- .. sourcecode:: sql
- INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
- * When RETURNING is not available or has been disabled via
- ``implicit_returning=False``, either the ``scope_identity()`` function or
- the ``@@identity`` variable is used; behavior varies by backend:
- * when using PyODBC, the phrase ``; select scope_identity()`` will be
- appended to the end of the INSERT statement; a second result set will be
- fetched in order to receive the value. Given a table as::
- t = Table('t', m, Column('id', Integer, primary_key=True),
- Column('x', Integer),
- implicit_returning=False)
- an INSERT will look like:
- .. sourcecode:: sql
- INSERT INTO t (x) VALUES (?); select scope_identity()
- * Other dialects such as pymssql will call upon
- ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
- statement. If the flag ``use_scope_identity=False`` is passed to
- :func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
- is used instead.
- A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
- that refers to the identity column explicitly. The SQLAlchemy dialect will
- detect when an INSERT construct, created using a core :func:`.insert`
- construct (not a plain string SQL), refers to the identity column, and
- in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
- statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
- execution. Given this example::
- m = MetaData()
- t = Table('t', m, Column('id', Integer, primary_key=True),
- Column('x', Integer))
- m.create_all(engine)
- engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
- The above column will be created with IDENTITY, however the INSERT statement
- we emit is specifying explicit values. In the echo output we can see
- how SQLAlchemy handles this:
- .. sourcecode:: sql
- CREATE TABLE t (
- id INTEGER NOT NULL IDENTITY(1,1),
- x INTEGER NULL,
- PRIMARY KEY (id)
- )
- COMMIT
- SET IDENTITY_INSERT t ON
- INSERT INTO t (id, x) VALUES (?, ?)
- ((1, 1), (2, 2))
- SET IDENTITY_INSERT t OFF
- COMMIT
- This
- is an auxiliary use case suitable for testing and bulk insert scenarios.
- MAX on VARCHAR / NVARCHAR
- -------------------------
- SQL Server supports the special string "MAX" within the
- :class:`.sqltypes.VARCHAR` and :class:`.sqltypes.NVARCHAR` datatypes,
- to indicate "maximum length possible". The dialect currently handles this as
- a length of "None" in the base type, rather than supplying a
- dialect-specific version of these types, so that a base type
- specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
- more than one backend without using dialect-specific types.
- To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
- my_table = Table(
- 'my_table', metadata,
- Column('my_data', VARCHAR(None)),
- Column('my_n_data', NVARCHAR(None))
- )
- Collation Support
- -----------------
- Character collations are supported by the base string types,
- specified by the string argument "collation"::
- from sqlalchemy import VARCHAR
- Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
- When such a column is associated with a :class:`.Table`, the
- CREATE TABLE statement for this column will yield::
- login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
- .. versionadded:: 0.8 Character collations are now part of the base string
- types.
- LIMIT/OFFSET Support
- --------------------
- MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
- supported directly through the ``TOP`` Transact SQL keyword::
- select.limit
- will yield::
- SELECT TOP n
- If using SQL Server 2005 or above, LIMIT with OFFSET
- support is available through the ``ROW_NUMBER OVER`` construct.
- For versions below 2005, LIMIT with OFFSET usage will fail.
- .. _mssql_isolation_level:
- Transaction Isolation Level
- ---------------------------
- All SQL Server dialects support setting of transaction isolation level
- both via a dialect-specific parameter
- :paramref:`.create_engine.isolation_level`
- accepted by :func:`.create_engine`,
- as well as the :paramref:`.Connection.execution_options.isolation_level`
- argument as passed to
- :meth:`.Connection.execution_options`. This feature works by issuing the
- command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
- each new connection.
- To set isolation level using :func:`.create_engine`::
- engine = create_engine(
- "mssql+pyodbc://scott:tiger@ms_2008",
- isolation_level="REPEATABLE READ"
- )
- To set using per-connection execution options::
- connection = engine.connect()
- connection = connection.execution_options(
- isolation_level="READ COMMITTED"
- )
- Valid values for ``isolation_level`` include:
- * ``READ COMMITTED``
- * ``READ UNCOMMITTED``
- * ``REPEATABLE READ``
- * ``SERIALIZABLE``
- * ``SNAPSHOT`` - specific to SQL Server
- .. versionadded:: 1.1 support for isolation level setting on Microsoft
- SQL Server.
- Nullability
- -----------
- MSSQL has support for three levels of column nullability. The default
- nullability allows nulls and is explicit in the CREATE TABLE
- construct::
- name VARCHAR(20) NULL
- If ``nullable=None`` is specified then no specification is made. In
- other words the database's configured default is used. This will
- render::
- name VARCHAR(20)
- If ``nullable`` is ``True`` or ``False`` then the column will be
- ``NULL`` or ``NOT NULL`` respectively.
- Date / Time Handling
- --------------------
- DATE and TIME are supported. Bind parameters are converted
- to datetime.datetime() objects as required by most MSSQL drivers,
- and results are processed from strings if needed.
- The DATE and TIME types are not available for MSSQL 2005 and
- previous - if a server version below 2008 is detected, DDL
- for these types will be issued as DATETIME.
- .. _mssql_large_type_deprecation:
- Large Text/Binary Type Deprecation
- ----------------------------------
- Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
- the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
- in a future release. SQLAlchemy normally relates these types to the
- :class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
- In order to accommodate this change, a new flag ``deprecate_large_types``
- is added to the dialect, which will be automatically set based on detection
- of the server version in use, if not otherwise set by the user. The
- behavior of this flag is as follows:
- * When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
- :class:`.LargeBinary` datatypes, when used to render DDL, will render the
- types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
- respectively. This is a new behavior as of the addition of this flag.
- * When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
- :class:`.LargeBinary` datatypes, when used to render DDL, will render the
- types ``NTEXT``, ``TEXT``, and ``IMAGE``,
- respectively. This is the long-standing behavior of these types.
- * The flag begins with the value ``None``, before a database connection is
- established. If the dialect is used to render DDL without the flag being
- set, it is interpreted the same as ``False``.
- * On first connection, the dialect detects if SQL Server version 2012 or greater
- is in use; if the flag is still at ``None``, it sets it to ``True`` or
- ``False`` based on whether 2012 or greater is detected.
- * The flag can be set to either ``True`` or ``False`` when the dialect
- is created, typically via :func:`.create_engine`::
- eng = create_engine("mssql+pymssql://user:pass@host/db",
- deprecate_large_types=True)
- * Complete control over whether the "old" or "new" types are rendered is
- available in all SQLAlchemy versions by using the UPPERCASE type objects
- instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
- :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
- fixed and always output exactly that type.
- .. versionadded:: 1.0.0
- .. _legacy_schema_rendering:
- Legacy Schema Mode
- ------------------
- Very old versions of the MSSQL dialect introduced the behavior such that a
- schema-qualified table would be auto-aliased when used in a
- SELECT statement; given a table::
- account_table = Table(
- 'account', metadata,
- Column('id', Integer, primary_key=True),
- Column('info', String(100)),
- schema="customer_schema"
- )
- this legacy mode of rendering would assume that "customer_schema.account"
- would not be accepted by all parts of the SQL statement, as illustrated
- below::
- >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
- >>> print(account_table.select().compile(eng))
- SELECT account_1.id, account_1.info
- FROM customer_schema.account AS account_1
- This mode of behavior is now off by default, as it appears to have served
- no purpose; however in the case that legacy applications rely upon it,
- it is available using the ``legacy_schema_aliasing`` argument to
- :func:`.create_engine` as illustrated above.
- .. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
- in version 1.0.5 to allow disabling of legacy mode for schemas now
- defaults to False.
- .. _mssql_indexes:
- Clustered Index Support
- -----------------------
- The MSSQL dialect supports clustered indexes (and primary keys) via the
- ``mssql_clustered`` option. This option is available to :class:`.Index`,
- :class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
- To generate a clustered index::
- Index("my_index", table.c.x, mssql_clustered=True)
- which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
- To generate a clustered primary key use::
- Table('my_table', metadata,
- Column('x', ...),
- Column('y', ...),
- PrimaryKeyConstraint("x", "y", mssql_clustered=True))
- which will render the table, for example, as::
- CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
- PRIMARY KEY CLUSTERED (x, y))
- Similarly, we can generate a clustered unique constraint using::
- Table('my_table', metadata,
- Column('x', ...),
- Column('y', ...),
- PrimaryKeyConstraint("x"),
- UniqueConstraint("y", mssql_clustered=True),
- )
- To explicitly request a non-clustered primary key (for example, when
- a separate clustered index is desired), use::
- Table('my_table', metadata,
- Column('x', ...),
- Column('y', ...),
- PrimaryKeyConstraint("x", "y", mssql_clustered=False))
- which will render the table, for example, as::
- CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
- PRIMARY KEY NONCLUSTERED (x, y))
- .. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
- to None, rather than False. ``mssql_clustered=False`` now explicitly
- renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
- clause entirely, allowing SQL Server defaults to take effect.
- MSSQL-Specific Index Options
- -----------------------------
- In addition to clustering, the MSSQL dialect supports other special options
- for :class:`.Index`.
- INCLUDE
- ^^^^^^^
- The ``mssql_include`` option renders INCLUDE(colname) for the given string
- names::
- Index("my_index", table.c.x, mssql_include=['y'])
- would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
- .. versionadded:: 0.8
- Index ordering
- ^^^^^^^^^^^^^^
- Index ordering is available via functional expressions, such as::
- Index("my_index", table.c.x.desc())
- would render the index as ``CREATE INDEX my_index ON table (x DESC)``
- .. versionadded:: 0.8
- .. seealso::
- :ref:`schema_indexes_functional`
- Compatibility Levels
- --------------------
- MSSQL supports the notion of setting compatibility levels at the
- database level. This allows, for instance, to run a database that
- is compatible with SQL2000 while running on a SQL2005 database
- server. ``server_version_info`` will always return the database
- server version information (in this case SQL2005) and not the
- compatibility level information. Because of this, if running under
- a backwards compatibility mode SQAlchemy may attempt to use T-SQL
- statements that are unable to be parsed by the database server.
- Triggers
- --------
- SQLAlchemy by default uses OUTPUT INSERTED to get at newly
- generated primary key values via IDENTITY columns or other
- server side defaults. MS-SQL does not
- allow the usage of OUTPUT INSERTED on tables that have triggers.
- To disable the usage of OUTPUT INSERTED on a per-table basis,
- specify ``implicit_returning=False`` for each :class:`.Table`
- which has triggers::
- Table('mytable', metadata,
- Column('id', Integer, primary_key=True),
- # ...,
- implicit_returning=False
- )
- Declarative form::
- class MyClass(Base):
- # ...
- __table_args__ = {'implicit_returning':False}
- This option can also be specified engine-wide using the
- ``implicit_returning=False`` argument on :func:`.create_engine`.
- .. _mssql_rowcount_versioning:
- Rowcount Support / ORM Versioning
- ---------------------------------
- The SQL Server drivers have very limited ability to return the number
- of rows updated from an UPDATE or DELETE statement. In particular, the
- pymssql driver has no support, whereas the pyodbc driver can only return
- this value under certain conditions.
- In particular, updated rowcount is not available when OUTPUT INSERTED
- is used. This impacts the SQLAlchemy ORM's versioning feature when
- server-side versioning schemes are used. When
- using pyodbc, the "implicit_returning" flag needs to be set to false
- for any ORM mapped class that uses a version_id column in conjunction with
- a server-side version generator::
- class MyTable(Base):
- __tablename__ = 'mytable'
- id = Column(Integer, primary_key=True)
- stuff = Column(String(10))
- timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
- __mapper_args__ = {
- 'version_id_col': timestamp,
- 'version_id_generator': False,
- }
- __table_args__ = {
- 'implicit_returning': False
- }
- Without the implicit_returning flag above, the UPDATE statement will
- use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
- -1, causing the versioning logic to fail.
- Enabling Snapshot Isolation
- ---------------------------
- Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
- isolation mode that locks entire tables, and causes even mildly concurrent
- applications to have long held locks and frequent deadlocks.
- Enabling snapshot isolation for the database as a whole is recommended
- for modern levels of concurrency support. This is accomplished via the
- following ALTER DATABASE commands executed at the SQL prompt::
- ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
- ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
- Background on SQL Server snapshot isolation is available at
- http://msdn.microsoft.com/en-us/library/ms175095.aspx.
- Known Issues
- ------------
- * No support for more than one ``IDENTITY`` column per table
- * reflection of indexes does not work with versions older than
- SQL Server 2005
- """
- import datetime
- import operator
- import re
- from ... import sql, schema as sa_schema, exc, util
- from ...sql import compiler, expression, util as sql_util
- from ... import engine
- from ...engine import reflection, default
- from ... import types as sqltypes
- from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
- FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
- TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
- from ...util import update_wrapper
- from . import information_schema as ischema
- # http://sqlserverbuilds.blogspot.com/
- MS_2016_VERSION = (13,)
- MS_2014_VERSION = (12,)
- MS_2012_VERSION = (11,)
- MS_2008_VERSION = (10,)
- MS_2005_VERSION = (9,)
- MS_2000_VERSION = (8,)
- RESERVED_WORDS = set(
- ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
- 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
- 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
- 'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
- 'containstable', 'continue', 'convert', 'create', 'cross', 'current',
- 'current_date', 'current_time', 'current_timestamp', 'current_user',
- 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
- 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
- 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
- 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
- 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
- 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
- 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
- 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
- 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
- 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
- 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
- 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
- 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
- 'reconfigure', 'references', 'replication', 'restore', 'restrict',
- 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
- 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
- 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
- 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
- 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
- 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
- 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
- 'writetext',
- ])
- class REAL(sqltypes.REAL):
- __visit_name__ = 'REAL'
- def __init__(self, **kw):
- # REAL is a synonym for FLOAT(24) on SQL server
- kw['precision'] = 24
- super(REAL, self).__init__(**kw)
- class TINYINT(sqltypes.Integer):
- __visit_name__ = 'TINYINT'
- # MSSQL DATE/TIME types have varied behavior, sometimes returning
- # strings. MSDate/TIME check for everything, and always
- # filter bind parameters into datetime objects (required by pyodbc,
- # not sure about other dialects).
- class _MSDate(sqltypes.Date):
- def bind_processor(self, dialect):
- def process(value):
- if type(value) == datetime.date:
- return datetime.datetime(value.year, value.month, value.day)
- else:
- return value
- return process
- _reg = re.compile(r"(\d+)-(\d+)-(\d+)")
- def result_processor(self, dialect, coltype):
- def process(value):
- if isinstance(value, datetime.datetime):
- return value.date()
- elif isinstance(value, util.string_types):
- m = self._reg.match(value)
- if not m:
- raise ValueError(
- "could not parse %r as a date value" % (value, ))
- return datetime.date(*[
- int(x or 0)
- for x in m.groups()
- ])
- else:
- return value
- return process
- class TIME(sqltypes.TIME):
- def __init__(self, precision=None, **kwargs):
- self.precision = precision
- super(TIME, self).__init__()
- __zero_date = datetime.date(1900, 1, 1)
- def bind_processor(self, dialect):
- def process(value):
- if isinstance(value, datetime.datetime):
- value = datetime.datetime.combine(
- self.__zero_date, value.time())
- elif isinstance(value, datetime.time):
- value = datetime.datetime.combine(self.__zero_date, value)
- return value
- return process
- _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
- def result_processor(self, dialect, coltype):
- def process(value):
- if isinstance(value, datetime.datetime):
- return value.time()
- elif isinstance(value, util.string_types):
- m = self._reg.match(value)
- if not m:
- raise ValueError(
- "could not parse %r as a time value" % (value, ))
- return datetime.time(*[
- int(x or 0)
- for x in m.groups()])
- else:
- return value
- return process
- _MSTime = TIME
- class _DateTimeBase(object):
- def bind_processor(self, dialect):
- def process(value):
- if type(value) == datetime.date:
- return datetime.datetime(value.year, value.month, value.day)
- else:
- return value
- return process
- class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
- pass
- class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
- __visit_name__ = 'SMALLDATETIME'
- class DATETIME2(_DateTimeBase, sqltypes.DateTime):
- __visit_name__ = 'DATETIME2'
- def __init__(self, precision=None, **kw):
- super(DATETIME2, self).__init__(**kw)
- self.precision = precision
- # TODO: is this not an Interval ?
- class DATETIMEOFFSET(sqltypes.TypeEngine):
- __visit_name__ = 'DATETIMEOFFSET'
- def __init__(self, precision=None, **kwargs):
- self.precision = precision
- class _StringType(object):
- """Base for MSSQL string types."""
- def __init__(self, collation=None):
- super(_StringType, self).__init__(collation=collation)
- class NTEXT(sqltypes.UnicodeText):
- """MSSQL NTEXT type, for variable-length unicode text up to 2^30
- characters."""
- __visit_name__ = 'NTEXT'
- class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
- """The MSSQL VARBINARY type.
- This type extends both :class:`.types.VARBINARY` and
- :class:`.types.LargeBinary`. In "deprecate_large_types" mode,
- the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
- on SQL Server.
- .. versionadded:: 1.0.0
- .. seealso::
- :ref:`mssql_large_type_deprecation`
- """
- __visit_name__ = 'VARBINARY'
- class IMAGE(sqltypes.LargeBinary):
- __visit_name__ = 'IMAGE'
- class BIT(sqltypes.TypeEngine):
- __visit_name__ = 'BIT'
- class MONEY(sqltypes.TypeEngine):
- __visit_name__ = 'MONEY'
- class SMALLMONEY(sqltypes.TypeEngine):
- __visit_name__ = 'SMALLMONEY'
- class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
- __visit_name__ = "UNIQUEIDENTIFIER"
- class SQL_VARIANT(sqltypes.TypeEngine):
- __visit_name__ = 'SQL_VARIANT'
- # old names.
- MSDateTime = _MSDateTime
- MSDate = _MSDate
- MSReal = REAL
- MSTinyInteger = TINYINT
- MSTime = TIME
- MSSmallDateTime = SMALLDATETIME
- MSDateTime2 = DATETIME2
- MSDateTimeOffset = DATETIMEOFFSET
- MSText = TEXT
- MSNText = NTEXT
- MSString = VARCHAR
- MSNVarchar = NVARCHAR
- MSChar = CHAR
- MSNChar = NCHAR
- MSBinary = BINARY
- MSVarBinary = VARBINARY
- MSImage = IMAGE
- MSBit = BIT
- MSMoney = MONEY
- MSSmallMoney = SMALLMONEY
- MSUniqueIdentifier = UNIQUEIDENTIFIER
- MSVariant = SQL_VARIANT
- ischema_names = {
- 'int': INTEGER,
- 'bigint': BIGINT,
- 'smallint': SMALLINT,
- 'tinyint': TINYINT,
- 'varchar': VARCHAR,
- 'nvarchar': NVARCHAR,
- 'char': CHAR,
- 'nchar': NCHAR,
- 'text': TEXT,
- 'ntext': NTEXT,
- 'decimal': DECIMAL,
- 'numeric': NUMERIC,
- 'float': FLOAT,
- 'datetime': DATETIME,
- 'datetime2': DATETIME2,
- 'datetimeoffset': DATETIMEOFFSET,
- 'date': DATE,
- 'time': TIME,
- 'smalldatetime': SMALLDATETIME,
- 'binary': BINARY,
- 'varbinary': VARBINARY,
- 'bit': BIT,
- 'real': REAL,
- 'image': IMAGE,
- 'timestamp': TIMESTAMP,
- 'money': MONEY,
- 'smallmoney': SMALLMONEY,
- 'uniqueidentifier': UNIQUEIDENTIFIER,
- 'sql_variant': SQL_VARIANT,
- }
- class MSTypeCompiler(compiler.GenericTypeCompiler):
- def _extend(self, spec, type_, length=None):
- """Extend a string-type declaration with standard SQL
- COLLATE annotations.
- """
- if getattr(type_, 'collation', None):
- collation = 'COLLATE %s' % type_.collation
- else:
- collation = None
- if not length:
- length = type_.length
- if length:
- spec = spec + "(%s)" % length
- return ' '.join([c for c in (spec, collation)
- if c is not None])
- def visit_FLOAT(self, type_, **kw):
- precision = getattr(type_, 'precision', None)
- if precision is None:
- return "FLOAT"
- else:
- return "FLOAT(%(precision)s)" % {'precision': precision}
- def visit_TINYINT(self, type_, **kw):
- return "TINYINT"
- def visit_DATETIMEOFFSET(self, type_, **kw):
- if type_.precision is not None:
- return "DATETIMEOFFSET(%s)" % type_.precision
- else:
- return "DATETIMEOFFSET"
- def visit_TIME(self, type_, **kw):
- precision = getattr(type_, 'precision', None)
- if precision is not None:
- return "TIME(%s)" % precision
- else:
- return "TIME"
- def visit_DATETIME2(self, type_, **kw):
- precision = getattr(type_, 'precision', None)
- if precision is not None:
- return "DATETIME2(%s)" % precision
- else:
- return "DATETIME2"
- def visit_SMALLDATETIME(self, type_, **kw):
- return "SMALLDATETIME"
- def visit_unicode(self, type_, **kw):
- return self.visit_NVARCHAR(type_, **kw)
- def visit_text(self, type_, **kw):
- if self.dialect.deprecate_large_types:
- return self.visit_VARCHAR(type_, **kw)
- else:
- return self.visit_TEXT(type_, **kw)
- def visit_unicode_text(self, type_, **kw):
- if self.dialect.deprecate_large_types:
- return self.visit_NVARCHAR(type_, **kw)
- else:
- return self.visit_NTEXT(type_, **kw)
- def visit_NTEXT(self, type_, **kw):
- return self._extend("NTEXT", type_)
- def visit_TEXT(self, type_, **kw):
- return self._extend("TEXT", type_)
- def visit_VARCHAR(self, type_, **kw):
- return self._extend("VARCHAR", type_, length=type_.length or 'max')
- def visit_CHAR(self, type_, **kw):
- return self._extend("CHAR", type_)
- def visit_NCHAR(self, type_, **kw):
- return self._extend("NCHAR", type_)
- def visit_NVARCHAR(self, type_, **kw):
- return self._extend("NVARCHAR", type_, length=type_.length or 'max')
- def visit_date(self, type_, **kw):
- if self.dialect.server_version_info < MS_2008_VERSION:
- return self.visit_DATETIME(type_, **kw)
- else:
- return self.visit_DATE(type_, **kw)
- def visit_time(self, type_, **kw):
- if self.dialect.server_version_info < MS_2008_VERSION:
- return self.visit_DATETIME(type_, **kw)
- else:
- return self.visit_TIME(type_, **kw)
- def visit_large_binary(self, type_, **kw):
- if self.dialect.deprecate_large_types:
- return self.visit_VARBINARY(type_, **kw)
- else:
- return self.visit_IMAGE(type_, **kw)
- def visit_IMAGE(self, type_, **kw):
- return "IMAGE"
- def visit_VARBINARY(self, type_, **kw):
- return self._extend(
- "VARBINARY",
- type_,
- length=type_.length or 'max')
- def visit_boolean(self, type_, **kw):
- return self.visit_BIT(type_)
- def visit_BIT(self, type_, **kw):
- return "BIT"
- def visit_MONEY(self, type_, **kw):
- return "MONEY"
- def visit_SMALLMONEY(self, type_, **kw):
- return 'SMALLMONEY'
- def visit_UNIQUEIDENTIFIER(self, type_, **kw):
- return "UNIQUEIDENTIFIER"
- def visit_SQL_VARIANT(self, type_, **kw):
- return 'SQL_VARIANT'
- class MSExecutionContext(default.DefaultExecutionContext):
- _enable_identity_insert = False
- _select_lastrowid = False
- _result_proxy = None
- _lastrowid = None
- def _opt_encode(self, statement):
- if not self.dialect.supports_unicode_statements:
- return self.dialect._encoder(statement)[0]
- else:
- return statement
- def pre_exec(self):
- """Activate IDENTITY_INSERT if needed."""
- if self.isinsert:
- tbl = self.compiled.statement.table
- seq_column = tbl._autoincrement_column
- insert_has_sequence = seq_column is not None
- if insert_has_sequence:
- self._enable_identity_insert = \
- seq_column.key in self.compiled_parameters[0] or \
- (
- self.compiled.statement.parameters and (
- (
- self.compiled.statement._has_multi_parameters
- and
- seq_column.key in
- self.compiled.statement.parameters[0]
- ) or (
- not
- self.compiled.statement._has_multi_parameters
- and
- seq_column.key in
- self.compiled.statement.parameters
- )
- )
- )
- else:
- self._enable_identity_insert = False
- self._select_lastrowid = not self.compiled.inline and \
- insert_has_sequence and \
- not self.compiled.returning and \
- not self._enable_identity_insert and \
- not self.executemany
- if self._enable_identity_insert:
- self.root_connection._cursor_execute(
- self.cursor,
- self._opt_encode(
- "SET IDENTITY_INSERT %s ON" %
- self.dialect.identifier_preparer.format_table(tbl)),
- (),
- self)
- def post_exec(self):
- """Disable IDENTITY_INSERT if enabled."""
- conn = self.root_connection
- if self._select_lastrowid:
- if self.dialect.use_scope_identity:
- conn._cursor_execute(
- self.cursor,
- "SELECT scope_identity() AS lastrowid", (), self)
- else:
- conn._cursor_execute(self.cursor,
- "SELECT @@identity AS lastrowid",
- (),
- self)
- # fetchall() ensures the cursor is consumed without closing it
- row = self.cursor.fetchall()[0]
- self._lastrowid = int(row[0])
- if (self.isinsert or self.isupdate or self.isdelete) and \
- self.compiled.returning:
- self._result_proxy = engine.FullyBufferedResultProxy(self)
- if self._enable_identity_insert:
- conn._cursor_execute(
- self.cursor,
- self._opt_encode(
- "SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer. format_table(
- self.compiled.statement.table)),
- (),
- self)
- def get_lastrowid(self):
- return self._lastrowid
- def handle_dbapi_exception(self, e):
- if self._enable_identity_insert:
- try:
- self.cursor.execute(
- self._opt_encode(
- "SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer. format_table(
- self.compiled.statement.table)))
- except Exception:
- pass
- def get_result_proxy(self):
- if self._result_proxy:
- return self._result_proxy
- else:
- return engine.ResultProxy(self)
- class MSSQLCompiler(compiler.SQLCompiler):
- returning_precedes_values = True
- extract_map = util.update_copy(
- compiler.SQLCompiler.extract_map,
- {
- 'doy': 'dayofyear',
- 'dow': 'weekday',
- 'milliseconds': 'millisecond',
- 'microseconds': 'microsecond'
- })
- def __init__(self, *args, **kwargs):
- self.tablealiases = {}
- super(MSSQLCompiler, self).__init__(*args, **kwargs)
- def _with_legacy_schema_aliasing(fn):
- def decorate(self, *arg, **kw):
- if self.dialect.legacy_schema_aliasing:
- return fn(self, *arg, **kw)
- else:
- super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
- return super_(*arg, **kw)
- return decorate
- def visit_now_func(self, fn, **kw):
- return "CURRENT_TIMESTAMP"
- def visit_current_date_func(self, fn, **kw):
- return "GETDATE()"
- def visit_length_func(self, fn, **kw):
- return "LEN%s" % self.function_argspec(fn, **kw)
- def visit_char_length_func(self, fn, **kw):
- return "LEN%s" % self.function_argspec(fn, **kw)
- def visit_concat_op_binary(self, binary, operator, **kw):
- return "%s + %s" % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw))
- def visit_true(self, expr, **kw):
- return '1'
- def visit_false(self, expr, **kw):
- return '0'
- def visit_match_op_binary(self, binary, operator, **kw):
- return "CONTAINS (%s, %s)" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw))
- def get_select_precolumns(self, select, **kw):
- """ MS-SQL puts TOP, it's version of LIMIT here """
- s = ""
- if select._distinct:
- s += "DISTINCT "
- if select._simple_int_limit and not select._offset:
- # ODBC drivers and possibly others
- # don't support bind params in the SELECT clause on SQL Server.
- # so have to use literal here.
- s += "TOP %d " % select._limit
- if s:
- return s
- else:
- return compiler.SQLCompiler.get_select_precolumns(
- self, select, **kw)
- def get_from_hint_text(self, table, text):
- return text
- def get_crud_hint_text(self, table, text):
- return text
- def limit_clause(self, select, **kw):
- # Limit in mssql is after the select keyword
- return ""
- def visit_select(self, select, **kwargs):
- """Look for ``LIMIT`` and OFFSET in a select statement, and if
- so tries to wrap it in a subquery with ``row_number()`` criterion.
- """
- if (
- (
- not select._simple_int_limit and
- select._limit_clause is not None
- ) or (
- select._offset_clause is not None and
- not select._simple_int_offset or select._offset
- )
- ) and not getattr(select, '_mssql_visit', None):
- # to use ROW_NUMBER(), an ORDER BY is required.
- if not select._order_by_clause.clauses:
- raise exc.CompileError('MSSQL requires an order_by when '
- 'using an OFFSET or a non-simple '
- 'LIMIT clause')
- _order_by_clauses = [
- sql_util.unwrap_label_reference(elem)
- for elem in select._order_by_clause.clauses
- ]
- limit_clause = select._limit_clause
- offset_clause = select._offset_clause
- kwargs['select_wraps_for'] = select
- select = select._generate()
- select._mssql_visit = True
- select = select.column(
- sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
- .label("mssql_rn")).order_by(None).alias()
- mssql_rn = sql.column('mssql_rn')
- limitselect = sql.select([c for c in select.c if
- c.key != 'mssql_rn'])
- if offset_clause is not None:
- limitselect.append_whereclause(mssql_rn > offset_clause)
- if limit_clause is not None:
- limitselect.append_whereclause(
- mssql_rn <= (limit_clause + offset_clause))
- else:
- limitselect.append_whereclause(
- mssql_rn <= (limit_clause))
- return self.process(limitselect, **kwargs)
- else:
- return compiler.SQLCompiler.visit_select(self, select, **kwargs)
- @_with_legacy_schema_aliasing
- def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
- if mssql_aliased is table or iscrud:
- return super(MSSQLCompiler, self).visit_table(table, **kwargs)
- # alias schema-qualified tables
- alias = self._schema_aliased_table(table)
- if alias is not None:
- return self.process(alias, mssql_aliased=table, **kwargs)
- else:
- return super(MSSQLCompiler, self).visit_table(table, **kwargs)
- @_with_legacy_schema_aliasing
- def visit_alias(self, alias, **kw):
- # translate for schema-qualified table aliases
- kw['mssql_aliased'] = alias.original
- return super(MSSQLCompiler, self).visit_alias(alias, **kw)
- @_with_legacy_schema_aliasing
- def visit_column(self, column, add_to_result_map=None, **kw):
- if column.table is not None and \
- (not self.isupdate and not self.isdelete) or \
- self.is_subquery():
- # translate for schema-qualified table aliases
- t = self._schema_aliased_table(column.table)
- if t is not None:
- converted = expression._corresponding_column_or_error(
- t, column)
- if add_to_result_map is not None:
- add_to_result_map(
- column.name,
- column.name,
- (column, column.name, column.key),
- column.type
- )
- return super(MSSQLCompiler, self).\
- visit_column(converted, **kw)
- return super(MSSQLCompiler, self).visit_column(
- column, add_to_result_map=add_to_result_map, **kw)
- def _schema_aliased_table(self, table):
- if getattr(table, 'schema', None) is not None:
- if table not in self.tablealiases:
- self.tablealiases[table] = table.alias()
- return self.tablealiases[table]
- else:
- return None
- def visit_extract(self, extract, **kw):
- field = self.extract_map.get(extract.field, extract.field)
- return 'DATEPART(%s, %s)' % \
- (field, self.process(extract.expr, **kw))
- def visit_savepoint(self, savepoint_stmt):
- return "SAVE TRANSACTION %s" % \
- self.preparer.format_savepoint(savepoint_stmt)
- def visit_rollback_to_savepoint(self, savepoint_stmt):
- return ("ROLLBACK TRANSACTION %s"
- % self.preparer.format_savepoint(savepoint_stmt))
- def visit_binary(self, binary, **kwargs):
- """Move bind parameters to the right-hand side of an operator, where
- possible.
- """
- if (
- isinstance(binary.left, expression.BindParameter)
- and binary.operator == operator.eq
- and not isinstance(binary.right, expression.BindParameter)
- ):
- return self.process(
- expression.BinaryExpression(binary.right,
- binary.left,
- binary.operator),
- **kwargs)
- return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
- def returning_clause(self, stmt, returning_cols):
- if self.isinsert or self.isupdate:
- target = stmt.table.alias("inserted")
- else:
- target = stmt.table.alias("deleted")
- adapter = sql_util.ClauseAdapter(target)
- columns = [
- self._label_select_column(None, adapter.traverse(c),
- True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
- return 'OUTPUT ' + ', '.join(columns)
- def get_cte_preamble(self, recursive):
- # SQL Server finds it too inconvenient to accept
- # an entirely optional, SQL standard specified,
- # "RECURSIVE" word with their "WITH",
- # so here we go
- return "WITH"
- def label_select_column(self, select, column, asfrom):
- if isinstance(column, expression.Function):
- return column.label(None)
- else:
- return super(MSSQLCompiler, self).\
- label_select_column(select, column, asfrom)
- def for_update_clause(self, select):
- # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
- # SQLAlchemy doesn't use
- return ''
- def order_by_clause(self, select, **kw):
- order_by = self.process(select._order_by_clause, **kw)
- # MSSQL only allows ORDER BY in subqueries if there is a LIMIT
- if order_by and (not self.is_subquery() or select._limit):
- return " ORDER BY " + order_by
- else:
- return ""
- def update_from_clause(self, update_stmt,
- from_table, extra_froms,
- from_hints,
- **kw):
- """Render the UPDATE..FROM clause specific to MSSQL.
- In MSSQL, if the UPDATE statement involves an alias of the table to
- be updated, then the table itself must be added to the FROM list as
- well. Otherwise, it is optional. Here, we add it regardless.
- """
- return "FROM " + ', '.join(
- t._compiler_dispatch(self, asfrom=True,
- fromhints=from_hints, **kw)
- for t in [from_table] + extra_froms)
- class MSSQLStrictCompiler(MSSQLCompiler):
- """A subclass of MSSQLCompiler which disables the usage of bind
- parameters where not allowed natively by MS-SQL.
- A dialect may use this compiler on a platform where native
- binds are used.
- """
- ansi_bind_rules = True
- def visit_in_op_binary(self, binary, operator, **kw):
- kw['literal_binds'] = True
- return "%s IN %s" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
- def visit_notin_op_binary(self, binary, operator, **kw):
- kw['literal_binds'] = True
- return "%s NOT IN %s" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
- def render_literal_value(self, value, type_):
- """
- For date and datetime values, convert to a string
- format acceptable to MSSQL. That seems to be the
- so-called ODBC canonical date format which looks
- like this:
- yyyy-mm-dd hh:mi:ss.mmm(24h)
- For other data types, call the base class implementation.
- """
- # datetime and date are both subclasses of datetime.date
- if issubclass(type(value), datetime.date):
- # SQL Server wants single quotes around the date string.
- return "'" + str(value) + "'"
- else:
- return super(MSSQLStrictCompiler, self).\
- render_literal_value(value, type_)
- class MSDDLCompiler(compiler.DDLCompiler):
- def get_column_specification(self, column, **kwargs):
- colspec = (
- self.preparer.format_column(column) + " "
- + self.dialect.type_compiler.process(
- column.type, type_expression=column)
- )
- if column.nullable is not None:
- if not column.nullable or column.primary_key or \
- isinstance(column.default, sa_schema.Sequence):
- colspec += " NOT NULL"
- else:
- colspec += " NULL"
- if column.table is None:
- raise exc.CompileError(
- "mssql requires Table-bound columns "
- "in order to generate DDL")
- # install an IDENTITY Sequence if we either a sequence or an implicit
- # IDENTITY column
- if isinstance(column.default, sa_schema.Sequence):
- if column.default.start == 0:
- start = 0
- else:
- start = column.default.start or 1
- colspec += " IDENTITY(%s,%s)" % (start,
- column.default.increment or 1)
- elif column is column.table._autoincrement_column:
- colspec += " IDENTITY(1,1)"
- else:
- default = self.get_column_default_string(column)
- if default is not None:
- colspec += " DEFAULT " + default
- return colspec
- def visit_create_index(self, create, include_schema=False):
- index = create.element
- self._verify_index_table(index)
- preparer = self.preparer
- text = "CREATE "
- if index.unique:
- text += "UNIQUE "
- # handle clustering option
- clustered = index.dialect_options['mssql']['clustered']
- if clustered is not None:
- if clustered:
- text += "CLUSTERED "
- else:
- text += "NONCLUSTERED "
- text += "INDEX %s ON %s (%s)" \
- % (
- self._prepared_index_name(index,
- include_schema=include_schema),
- preparer.format_table(index.table),
- ', '.join(
- self.sql_compiler.process(expr,
- include_table=False,
- literal_binds=True) for
- expr in index.expressions)
- )
- # handle other included columns
- if index.dialect_options['mssql']['include']:
- inclusions = [index.table.c[col]
- if isinstance(col, util.string_types) else col
- for col in
- index.dialect_options['mssql']['include']
- ]
- text += " INCLUDE (%s)" \
- % ', '.join([preparer.quote(c.name)
- for c in inclusions])
- return text
- def visit_drop_index(self, drop):
- return "\nDROP INDEX %s ON %s" % (
- self._prepared_index_name(drop.element, include_schema=False),
- self.preparer.format_table(drop.element.table)
- )
- def visit_primary_key_constraint(self, constraint):
- if len(constraint) == 0:
- return ''
- text = ""
- if constraint.name is not None:
- text += "CONSTRAINT %s " % \
- self.preparer.format_constraint(constraint)
- text += "PRIMARY KEY "
- clustered = constraint.dialect_options['mssql']['clustered']
- if clustered is not None:
- if clustered:
- text += "CLUSTERED "
- else:
- text += "NONCLUSTERED "
- text += "(%s)" % ', '.join(self.preparer.quote(c.name)
- for c in constraint)
- text += self.define_constraint_deferrability(constraint)
- return text
- def visit_unique_constraint(self, constraint):
- if len(constraint) == 0:
- return ''
- text = ""
- if constraint.name is not None:
- text += "CONSTRAINT %s " % \
- self.preparer.format_constraint(constraint)
- text += "UNIQUE "
- clustered = constraint.dialect_options['mssql']['clustered']
- if clustered is not None:
- if clustered:
- text += "CLUSTERED "
- else:
- text += "NONCLUSTERED "
- text += "(%s)" % ', '.join(self.preparer.quote(c.name)
- for c in constraint)
- text += self.define_constraint_deferrability(constraint)
- return text
- class MSIdentifierPreparer(compiler.IdentifierPreparer):
- reserved_words = RESERVED_WORDS
- def __init__(self, dialect):
- super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
- final_quote=']')
- def _escape_identifier(self, value):
- return value
- def quote_schema(self, schema, force=None):
- """Prepare a quoted table and schema name."""
- result = '.'.join([self.quote(x, force) for x in schema.split('.')])
- return result
- def _db_plus_owner_listing(fn):
- def wrap(dialect, connection, schema=None, **kw):
- dbname, owner = _owner_plus_db(dialect, schema)
- return _switch_db(dbname, connection, fn, dialect, connection,
- dbname, owner, schema, **kw)
- return update_wrapper(wrap, fn)
- def _db_plus_owner(fn):
- def wrap(dialect, connection, tablename, schema=None, **kw):
- dbname, owner = _owner_plus_db(dialect, schema)
- return _switch_db(dbname, connection, fn, dialect, connection,
- tablename, dbname, owner, schema, **kw)
- return update_wrapper(wrap, fn)
- def _switch_db(dbname, connection, fn, *arg, **kw):
- if dbname:
- current_db = connection.scalar("select db_name()")
- connection.execute("use %s" % dbname)
- try:
- return fn(*arg, **kw)
- finally:
- if dbname:
- connection.execute("use %s" % current_db)
- def _owner_plus_db(dialect, schema):
- if not schema:
- return None, dialect.default_schema_name
- elif "." in schema:
- return schema.split(".", 1)
- else:
- return None, schema
- class MSDialect(default.DefaultDialect):
- name = 'mssql'
- supports_default_values = True
- supports_empty_insert = False
- execution_ctx_cls = MSExecutionContext
- use_scope_identity = True
- max_identifier_length = 128
- schema_name = "dbo"
- colspecs = {
- sqltypes.DateTime: _MSDateTime,
- sqltypes.Date: _MSDate,
- sqltypes.Time: TIME,
- }
- engine_config_types = default.DefaultDialect.engine_config_types.union([
- ('legacy_schema_aliasing', util.asbool),
- ])
- ischema_names = ischema_names
- supports_native_boolean = False
- supports_unicode_binds = True
- postfetch_lastrowid = True
- server_version_info = ()
- statement_compiler = MSSQLCompiler
- ddl_compiler = MSDDLCompiler
- type_compiler = MSTypeCompiler
- preparer = MSIdentifierPreparer
- construct_arguments = [
- (sa_schema.PrimaryKeyConstraint, {
- "clustered": None
- }),
- (sa_schema.UniqueConstraint, {
- "clustered": None
- }),
- (sa_schema.Index, {
- "clustered": None,
- "include": None
- })
- ]
- def __init__(self,
- query_timeout=None,
- use_scope_identity=True,
- max_identifier_length=None,
- schema_name="dbo",
- isolation_level=None,
- deprecate_large_types=None,
- legacy_schema_aliasing=False, **opts):
- self.query_timeout = int(query_timeout or 0)
- self.schema_name = schema_name
- self.use_scope_identity = use_scope_identity
- self.max_identifier_length = int(max_identifier_length or 0) or \
- self.max_identifier_length
- self.deprecate_large_types = deprecate_large_types
- self.legacy_schema_aliasing = legacy_schema_aliasing
- super(MSDialect, self).__init__(**opts)
- self.isolation_level = isolation_level
- def do_savepoint(self, connection, name):
- # give the DBAPI a push
- connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
- super(MSDialect, self).do_savepoint(connection, name)
- def do_release_savepoint(self, connection, name):
- # SQL Server does not support RELEASE SAVEPOINT
- pass
- _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
- 'READ COMMITTED', 'REPEATABLE READ',
- 'SNAPSHOT'])
- def set_isolation_level(self, connection, level):
- level = level.replace('_', ' ')
- if level not in self._isolation_lookup:
- raise exc.ArgumentError(
- "Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
- )
- cursor = connection.cursor()
- cursor.execute(
- "SET TRANSACTION ISOLATION LEVEL %s" % level)
- cursor.close()
- def get_isolation_level(self, connection):
- if self.server_version_info < MS_2005_VERSION:
- raise NotImplementedError(
- "Can't fetch isolation level prior to SQL Server 2005")
- cursor = connection.cursor()
- cursor.execute("""
- SELECT CASE transaction_isolation_level
- WHEN 0 THEN NULL
- WHEN 1 THEN 'READ UNCOMMITTED'
- WHEN 2 THEN 'READ COMMITTED'
- WHEN 3 THEN 'REPEATABLE READ'
- WHEN 4 THEN 'SERIALIZABLE'
- WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
- FROM sys.dm_exec_sessions
- where session_id = @@SPID
- """)
- val = cursor.fetchone()[0]
- cursor.close()
- return val.upper()
- def initialize(self, connection):
- super(MSDialect, self).initialize(connection)
- self._setup_version_attributes()
- def on_connect(self):
- if self.isolation_level is not None:
- def connect(conn):
- self.set_isolation_level(conn, self.isolation_level)
- return connect
- else:
- return None
- def _setup_version_attributes(self):
- if self.server_version_info[0] not in list(range(8, 17)):
- util.warn(
- "Unrecognized server version info '%s'. Some SQL Server "
- "features may not function properly." %
- ".".join(str(x) for x in self.server_version_info))
- if self.server_version_info >= MS_2005_VERSION and \
- 'implicit_returning' not in self.__dict__:
- self.implicit_returning = True
- if self.server_version_info >= MS_2008_VERSION:
- self.supports_multivalues_insert = True
- if self.deprecate_large_types is None:
- self.deprecate_large_types = \
- self.server_version_info >= MS_2012_VERSION
- def _get_default_schema_name(self, connection):
- if self.server_version_info < MS_2005_VERSION:
- return self.schema_name
- else:
- query = sql.text("SELECT schema_name()")
- default_schema_name = connection.scalar(query)
- if default_schema_name is not None:
- return util.text_type(default_schema_name)
- else:
- return self.schema_name
- @_db_plus_owner
- def has_table(self, connection, tablename, dbname, owner, schema):
- columns = ischema.columns
- whereclause = columns.c.table_name == tablename
- if owner:
- whereclause = sql.and_(whereclause,
- columns.c.table_schema == owner)
- s = sql.select([columns], whereclause)
- c = connection.execute(s)
- return c.first() is not None
- @reflection.cache
- def get_schema_names(self, connection, **kw):
- s = sql.select([ischema.schemata.c.schema_name],
- order_by=[ischema.schemata.c.schema_name]
- )
- schema_names = [r[0] for r in connection.execute(s)]
- return schema_names
- @reflection.cache
- @_db_plus_owner_listing
- def get_table_names(self, connection, dbname, owner, schema, **kw):
- tables = ischema.tables
- s = sql.select([tables.c.table_name],
- sql.and_(
- tables.c.table_schema == owner,
- tables.c.table_type == 'BASE TABLE'
- ),
- order_by=[tables.c.table_name]
- )
- table_names = [r[0] for r in connection.execute(s)]
- return table_names
- @reflection.cache
- @_db_plus_owner_listing
- def get_view_names(self, connection, dbname, owner, schema, **kw):
- tables = ischema.tables
- s = sql.select([tables.c.table_name],
- sql.and_(
- tables.c.table_schema == owner,
- tables.c.table_type == 'VIEW'
- ),
- order_by=[tables.c.table_name]
- )
- view_names = [r[0] for r in connection.execute(s)]
- return view_names
- @reflection.cache
- @_db_plus_owner
- def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
- # using system catalogs, don't support index reflection
- # below MS 2005
- if self.server_version_info < MS_2005_VERSION:
- return []
- rp = connection.execute(
- sql.text("select ind.index_id, ind.is_unique, ind.name "
- "from sys.indexes as ind join sys.tables as tab on "
- "ind.object_id=tab.object_id "
- "join sys.schemas as sch on sch.schema_id=tab.schema_id "
- "where tab.name = :tabname "
- "and sch.name=:schname "
- "and ind.is_primary_key=0",
- bindparams=[
- sql.bindparam('tabname', tablename,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ],
- typemap={
- 'name': sqltypes.Unicode()
- }
- )
- )
- indexes = {}
- for row in rp:
- indexes[row['index_id']] = {
- 'name': row['name'],
- 'unique': row['is_unique'] == 1,
- 'column_names': []
- }
- rp = connection.execute(
- sql.text(
- "select ind_col.index_id, ind_col.object_id, col.name "
- "from sys.columns as col "
- "join sys.tables as tab on tab.object_id=col.object_id "
- "join sys.index_columns as ind_col on "
- "(ind_col.column_id=col.column_id and "
- "ind_col.object_id=tab.object_id) "
- "join sys.schemas as sch on sch.schema_id=tab.schema_id "
- "where tab.name=:tabname "
- "and sch.name=:schname",
- bindparams=[
- sql.bindparam('tabname', tablename,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ],
- typemap={'name': sqltypes.Unicode()}
- ),
- )
- for row in rp:
- if row['index_id'] in indexes:
- indexes[row['index_id']]['column_names'].append(row['name'])
- return list(indexes.values())
- @reflection.cache
- @_db_plus_owner
- def get_view_definition(self, connection, viewname,
- dbname, owner, schema, **kw):
- rp = connection.execute(
- sql.text(
- "select definition from sys.sql_modules as mod, "
- "sys.views as views, "
- "sys.schemas as sch"
- " where "
- "mod.object_id=views.object_id and "
- "views.schema_id=sch.schema_id and "
- "views.name=:viewname and sch.name=:schname",
- bindparams=[
- sql.bindparam('viewname', viewname,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ]
- )
- )
- if rp:
- view_def = rp.scalar()
- return view_def
- @reflection.cache
- @_db_plus_owner
- def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
- # Get base columns
- columns = ischema.columns
- if owner:
- whereclause = sql.and_(columns.c.table_name == tablename,
- columns.c.table_schema == owner)
- else:
- whereclause = columns.c.table_name == tablename
- s = sql.select([columns], whereclause,
- order_by=[columns.c.ordinal_position])
- c = connection.execute(s)
- cols = []
- while True:
- row = c.fetchone()
- if row is None:
- break
- (name, type, nullable, charlen,
- numericprec, numericscale, default, collation) = (
- row[columns.c.column_name],
- row[columns.c.data_type],
- row[columns.c.is_nullable] == 'YES',
- row[columns.c.character_maximum_length],
- row[columns.c.numeric_precision],
- row[columns.c.numeric_scale],
- row[columns.c.column_default],
- row[columns.c.collation_name]
- )
- coltype = self.ischema_names.get(type, None)
- kwargs = {}
- if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
- MSNText, MSBinary, MSVarBinary,
- sqltypes.LargeBinary):
- if charlen == -1:
- charlen = None
- kwargs['length'] = charlen
- if collation:
- kwargs['collation'] = collation
- if coltype is None:
- util.warn(
- "Did not recognize type '%s' of column '%s'" %
- (type, name))
- coltype = sqltypes.NULLTYPE
- else:
- if issubclass(coltype, sqltypes.Numeric) and \
- coltype is not MSReal:
- kwargs['scale'] = numericscale
- kwargs['precision'] = numericprec
- coltype = coltype(**kwargs)
- cdict = {
- 'name': name,
- 'type': coltype,
- 'nullable': nullable,
- 'default': default,
- 'autoincrement': False,
- }
- cols.append(cdict)
- # autoincrement and identity
- colmap = {}
- for col in cols:
- colmap[col['name']] = col
- # We also run an sp_columns to check for identity columns:
- cursor = connection.execute("sp_columns @table_name = '%s', "
- "@table_owner = '%s'"
- % (tablename, owner))
- ic = None
- while True:
- row = cursor.fetchone()
- if row is None:
- break
- (col_name, type_name) = row[3], row[5]
- if type_name.endswith("identity") and col_name in colmap:
- ic = col_name
- colmap[col_name]['autoincrement'] = True
- colmap[col_name]['sequence'] = dict(
- name='%s_identity' % col_name)
- break
- cursor.close()
- if ic is not None and self.server_version_info >= MS_2005_VERSION:
- table_fullname = "%s.%s" % (owner, tablename)
- cursor = connection.execute(
- "select ident_seed('%s'), ident_incr('%s')"
- % (table_fullname, table_fullname)
- )
- row = cursor.first()
- if row is not None and row[0] is not None:
- colmap[ic]['sequence'].update({
- 'start': int(row[0]),
- 'increment': int(row[1])
- })
- return cols
- @reflection.cache
- @_db_plus_owner
- def get_pk_constraint(self, connection, tablename,
- dbname, owner, schema, **kw):
- pkeys = []
- TC = ischema.constraints
- C = ischema.key_constraints.alias('C')
- # Primary key constraints
- s = sql.select([C.c.column_name,
- TC.c.constraint_type,
- C.c.constraint_name],
- sql.and_(TC.c.constraint_name == C.c.constraint_name,
- TC.c.table_schema == C.c.table_schema,
- C.c.table_name == tablename,
- C.c.table_schema == owner)
- )
- c = connection.execute(s)
- constraint_name = None
- for row in c:
- if 'PRIMARY' in row[TC.c.constraint_type.name]:
- pkeys.append(row[0])
- if constraint_name is None:
- constraint_name = row[C.c.constraint_name.name]
- return {'constrained_columns': pkeys, 'name': constraint_name}
- @reflection.cache
- @_db_plus_owner
- def get_foreign_keys(self, connection, tablename,
- dbname, owner, schema, **kw):
- RR = ischema.ref_constraints
- C = ischema.key_constraints.alias('C')
- R = ischema.key_constraints.alias('R')
- # Foreign key constraints
- s = sql.select([C.c.column_name,
- R.c.table_schema, R.c.table_name, R.c.column_name,
- RR.c.constraint_name, RR.c.match_option,
- RR.c.update_rule,
- RR.c.delete_rule],
- sql.and_(C.c.table_name == tablename,
- C.c.table_schema == owner,
- C.c.constraint_name == RR.c.constraint_name,
- R.c.constraint_name ==
- RR.c.unique_constraint_name,
- C.c.ordinal_position == R.c.ordinal_position
- ),
- order_by=[RR.c.constraint_name, R.c.ordinal_position]
- )
- # group rows by constraint ID, to handle multi-column FKs
- fkeys = []
- fknm, scols, rcols = (None, [], [])
- def fkey_rec():
- return {
- 'name': None,
- 'constrained_columns': [],
- 'referred_schema': None,
- 'referred_table': None,
- 'referred_columns': []
- }
- fkeys = util.defaultdict(fkey_rec)
- for r in connection.execute(s).fetchall():
- scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
- rec = fkeys[rfknm]
- rec['name'] = rfknm
- if not rec['referred_table']:
- rec['referred_table'] = rtbl
- if schema is not None or owner != rschema:
- if dbname:
- rschema = dbname + "." + rschema
- rec['referred_schema'] = rschema
- local_cols, remote_cols = \
- rec['constrained_columns'],\
- rec['referred_columns']
- local_cols.append(scol)
- remote_cols.append(rcol)
- return list(fkeys.values())
|