PostGIS  2.3.7dev-r@@SVN_REVISION@@
int ShpDumperOpenTable ( SHPDUMPERSTATE state)

Definition at line 1334 of file pgsql2shp-core.c.

References _, shp_dumper_state::big_endian, shp_dumper_config::binary, colmap_dbf_by_pg(), colmap_read(), shp_dumper_state::column_map, shp_dumper_config::column_map_filename, shp_dumper_state::config, shp_dumper_state::conn, shp_dumper_state::currescount, shp_dumper_state::curresrow, shp_dumper_state::currow, shp_dumper_state::dbf, DBFAddField(), DBFCreateEx(), shp_dumper_state::dbffieldnames, shp_dumper_state::dbffieldtypes, encoding2codepage(), shp_dumper_state::fetch_query, shp_dumper_state::fetchres, shp_dumper_config::fetchsize, shp_dumper_state::fieldcount, free(), shp_dumper_config::geo_col_name, shp_dumper_state::geo_col_name, shp_dumper_state::geog_oid, shp_dumper_state::geom_oid, getMaxFieldSize(), getTableInfo(), shp_dumper_config::includegid, shp_dumper_config::keep_fieldname_case, LWDEBUGF, shp_dumper_state::main_scan_query, malloc(), MAX_DBF_FIELD_SIZE, shp_dumper_state::message, shp_dumper_state::outshptype, shp_dumper_state::outtype, shp_dumper_state::pgfieldlens, shp_dumper_state::pgfieldnames, shp_dumper_state::pgfieldtypmods, shp_dumper_state::pgis_major_version, quote_identifier(), window::res, shp_dumper_state::rowcount, shp_dumper_config::schema, shp_dumper_state::schema, shp_dumper_state::shp, shp_dumper_config::shp_file, shp_dumper_state::shp_file, SHPCreate(), SHPDUMPERERR, SHPDUMPERMSGLEN, SHPDUMPEROK, SHPDUMPERWARN, shp_dumper_config::table, shp_dumper_state::table, shp_dumper_config::unescapedattrs, and shp_dumper_config::usrquery.

Referenced by main(), and pgui_action_export().

1335 {
1336  PGresult *res;
1337 
1338  char buf[256];
1339  char *query;
1340  int gidfound = 0, i, j, ret, status;
1341 
1342 
1343  /* Open the column map if one was specified */
1344  if (state->config->column_map_filename)
1345  {
1346  ret = colmap_read(state->config->column_map_filename,
1347  &state->column_map, state->message, SHPDUMPERMSGLEN);
1348  if (!ret) return SHPDUMPERERR;
1349  }
1350 
1351  /* If a user-defined query has been specified, create and point the state to our new table */
1352  if (state->config->usrquery)
1353  {
1354  state->table = malloc(20 + 20); /* string + max long precision */
1355  sprintf(state->table, "__pgsql2shp%lu_tmp_table", (long)getpid());
1356 
1357  query = malloc(32 + strlen(state->table) + strlen(state->config->usrquery));
1358 
1359  sprintf(query, "CREATE TEMP TABLE \"%s\" AS %s", state->table, state->config->usrquery);
1360  res = PQexec(state->conn, query);
1361  free(query);
1362 
1363  /* Execute the code to create the table */
1364  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1365  {
1366  snprintf(state->message, SHPDUMPERMSGLEN, _("Error executing user query: %s"), PQresultErrorMessage(res));
1367  PQclear(res);
1368  return SHPDUMPERERR;
1369  }
1370  }
1371  else
1372  {
1373  /* Simply point the state to copies of the supplied schema and table */
1374  state->table = strdup(state->config->table);
1375  if (state->config->schema)
1376  state->schema = strdup(state->config->schema);
1377  }
1378 
1379 
1380  /* Get the list of columns and their types for the selected table */
1381  if (state->schema)
1382  {
1383  query = malloc(250 + strlen(state->schema) + strlen(state->table));
1384 
1385  sprintf(query, "SELECT a.attname, a.atttypid, "
1386  "a.atttypmod, a.attlen FROM "
1387  "pg_attribute a, pg_class c, pg_namespace n WHERE "
1388  "n.nspname = '%s' AND a.attrelid = c.oid AND "
1389  "n.oid = c.relnamespace AND "
1390  "a.atttypid != 0 AND "
1391  "a.attnum > 0 AND c.relname = '%s'", state->schema, state->table);
1392  }
1393  else
1394  {
1395  query = malloc(250 + strlen(state->table));
1396 
1397  sprintf(query, "SELECT a.attname, a.atttypid, "
1398  "a.atttypmod, a.attlen FROM "
1399  "pg_attribute a, pg_class c WHERE "
1400  "a.attrelid = c.oid and a.attnum > 0 AND "
1401  "a.atttypid != 0 AND "
1402  "c.relname = '%s' AND "
1403  "pg_catalog.pg_table_is_visible(c.oid)", state->table);
1404  }
1405 
1406  LWDEBUGF(3, "query is: %s\n", query);
1407 
1408  res = PQexec(state->conn, query);
1409  free(query);
1410 
1411  if (PQresultStatus(res) != PGRES_TUPLES_OK)
1412  {
1413  snprintf(state->message, SHPDUMPERMSGLEN, _("Error querying for attributes: %s"), PQresultErrorMessage(res));
1414  PQclear(res);
1415  return SHPDUMPERERR;
1416  }
1417 
1418  if (!PQntuples(res))
1419  {
1420  snprintf(state->message, SHPDUMPERMSGLEN, _("Table %s does not exist"), state->table);
1421  PQclear(res);
1422  return SHPDUMPERERR;
1423  }
1424 
1425  /* If a shapefile name was specified, use it. Otherwise simply use the table name. */
1426  if (state->config->shp_file != NULL)
1427  state->shp_file = state->config->shp_file;
1428  else
1429  state->shp_file = state->table;
1430 
1431  /* Create the dbf file: */
1432  /* If there's a user-specified encoding hanging around, try and use that. */
1433  /* Otherwise, just use UTF-8 encoding, since that's usually our client encoding. */
1434  if ( getenv("PGCLIENTENCODING") )
1435  {
1436  char *codepage = encoding2codepage(getenv("PGCLIENTENCODING"));
1437  state->dbf = DBFCreateEx(state->shp_file, codepage);
1438  }
1439  else
1440  {
1441  state->dbf = DBFCreateEx(state->shp_file, "UTF-8");
1442  }
1443 
1444  if (!state->dbf)
1445  {
1446  snprintf(state->message, SHPDUMPERMSGLEN, _("Could not create dbf file %s"), state->shp_file);
1447  return SHPDUMPERERR;
1448  }
1449 
1450  /*
1451  * Scan the result setting fields to be returned in mainscan
1452  * query, filling the type_ary, and creating .dbf and .shp files.
1453  */
1454  state->dbffieldnames = malloc(sizeof(char *) * PQntuples(res));
1455  state->dbffieldtypes = malloc(sizeof(int) * PQntuples(res));
1456  state->pgfieldnames = malloc(sizeof(char *) * PQntuples(res));
1457  state->pgfieldlens = malloc(sizeof(int) * PQntuples(res));
1458  state->pgfieldtypmods = malloc(sizeof(int) * PQntuples(res));
1459  state->fieldcount = 0;
1460  int tmpint = 1;
1461 
1462  for (i = 0; i < PQntuples(res); i++)
1463  {
1464  char *ptr;
1465 
1466  int pgfieldtype, pgtypmod, pgfieldlen;
1467  char *pgfieldname;
1468 
1469  int dbffieldtype, dbffieldsize, dbffielddecs;
1470  char *dbffieldname;
1471 
1472  pgfieldname = PQgetvalue(res, i, 0);
1473  pgfieldtype = atoi(PQgetvalue(res, i, 1));
1474  pgtypmod = atoi(PQgetvalue(res, i, 2));
1475  pgfieldlen = atoi(PQgetvalue(res, i, 3));
1476  dbffieldtype = -1;
1477  dbffieldsize = 0;
1478  dbffielddecs = 0;
1479 
1480  /*
1481  * This is a geometry/geography column
1482  */
1483  if (pgfieldtype == state->geom_oid || pgfieldtype == state->geog_oid)
1484  {
1485  /* If no geometry/geography column has been found yet... */
1486  if (!state->geo_col_name)
1487  {
1488  /* If either no geo* column name was provided (in which case this is
1489  the first match) or we match the provided column name, we have
1490  found our geo* column */
1491  if (!state->config->geo_col_name || !strcmp(state->config->geo_col_name, pgfieldname))
1492  {
1493  dbffieldtype = 9;
1494 
1495  state->geo_col_name = strdup(pgfieldname);
1496  }
1497  }
1498  }
1499 
1500  /*
1501  * Everything else (non geometries) will be
1502  * a DBF attribute.
1503  */
1504 
1505  /* Skip gid (if not asked to do otherwise */
1506  if (!strcmp(pgfieldname, "gid") )
1507  {
1508  gidfound = 1;
1509 
1510  if (!state->config->includegid)
1511  continue;
1512  }
1513 
1514  /* Unescape any reserved column names */
1515  ptr = pgfieldname;
1516  if (!state->config->unescapedattrs)
1517  {
1518  if (*ptr == '_')
1519  ptr += 2;
1520  }
1521 
1522  /*
1523  * This needs special handling since both xmin and _xmin
1524  * becomes __xmin when escaped
1525  */
1526 
1527  /* Limit dbf field name to 10-digits */
1528  dbffieldname = malloc(11);
1529  strncpy(dbffieldname, ptr, 10);
1530  dbffieldname[10] = '\0';
1531 
1532  /* If a column map file has been passed in,
1533  * use this to create the dbf field name from
1534  * the PostgreSQL column name */
1535  {
1536  const char *mapped = colmap_dbf_by_pg(&state->column_map, dbffieldname);
1537  if (mapped)
1538  {
1539  strncpy(dbffieldname, mapped, 10);
1540  dbffieldname[10] = '\0';
1541  }
1542  }
1543 
1544  /*
1545  * make sure the fields all have unique names,
1546  */
1547  tmpint = 1;
1548  for (j = 0; j < state->fieldcount; j++)
1549  {
1550  if (!strncasecmp(dbffieldname, state->dbffieldnames[j], 10))
1551  {
1552  sprintf(dbffieldname, "%.7s_%.2d", ptr, tmpint++);
1553  continue;
1554  }
1555  }
1556 
1557  /* make UPPERCASE if keep_fieldname_case = 0 */
1558  if (!state->config->keep_fieldname_case)
1559  for (j = 0; j < strlen(dbffieldname); j++)
1560  dbffieldname[j] = toupper(dbffieldname[j]);
1561 
1562  /* Issue warning if column has been renamed */
1563  if (strcasecmp(dbffieldname, pgfieldname))
1564  {
1565  if ( snprintf(buf, 256, _("Warning, field %s renamed to %s\n"),
1566  pgfieldname, dbffieldname) >= 256 )
1567  {
1568  buf[255] = '\0';
1569  }
1570  /* Note: we concatenate all warnings from the main loop as this is useful information */
1571  strncat(state->message, buf, SHPDUMPERMSGLEN - strlen(state->message) - 1);
1572 
1573  ret = SHPDUMPERWARN;
1574  }
1575 
1576 
1577  /*
1578  * Find appropriate type of dbf attributes
1579  */
1580 
1581  /* int2 type */
1582  if (pgfieldtype == 21)
1583  {
1584  /*
1585  * Longest text representation for
1586  * an int2 type (16bit) is 6 bytes
1587  * (-32768)
1588  */
1589  dbffieldtype = FTInteger;
1590  dbffieldsize = 6;
1591  dbffielddecs = 0;
1592  }
1593 
1594  /* int4 type */
1595  else if (pgfieldtype == 23)
1596  {
1597  /*
1598  * Longest text representation for
1599  * an int4 type (32bit) is 11 bytes
1600  * (-2147483648)
1601  */
1602  dbffieldtype = FTInteger;
1603  dbffieldsize = 11;
1604  dbffielddecs = 0;
1605  }
1606 
1607  /* int8 type */
1608  else if (pgfieldtype == 20)
1609  {
1610  /*
1611  * Longest text representation for
1612  * an int8 type (64bit) is 20 bytes
1613  * (-9223372036854775808)
1614  */
1615  dbffieldtype = FTInteger;
1616  dbffieldsize = 19;
1617  dbffielddecs = 0;
1618  }
1619 
1620  /*
1621  * double or numeric types:
1622  * 700: float4
1623  * 701: float8
1624  * 1700: numeric
1625  *
1626  *
1627  * TODO: stricter handling of sizes
1628  */
1629  else if (pgfieldtype == 700 || pgfieldtype == 701 || pgfieldtype == 1700)
1630  {
1631  dbffieldtype = FTDouble;
1632  dbffieldsize = 32;
1633  dbffielddecs = 10;
1634  }
1635 
1636  /*
1637  * Boolean field, we use FTLogical
1638  */
1639  else if (pgfieldtype == 16)
1640  {
1641  dbffieldtype = FTLogical;
1642  dbffieldsize = 1;
1643  dbffielddecs = 0;
1644  }
1645 
1646  /*
1647  * Date field
1648  */
1649  else if (pgfieldtype == 1082)
1650  {
1651  dbffieldtype = FTDate;
1652  dbffieldsize = 8;
1653  dbffielddecs = 0;
1654  }
1655 
1656  /*
1657  * time, timetz, timestamp, or timestamptz field.
1658  */
1659  else if (pgfieldtype == 1083 || pgfieldtype == 1266 || pgfieldtype == 1114 || pgfieldtype == 1184)
1660  {
1661  int secondsize;
1662 
1663  switch (pgtypmod)
1664  {
1665  case -1:
1666  secondsize = 6 + 1;
1667  break;
1668  case 0:
1669  secondsize = 0;
1670  break;
1671  default:
1672  secondsize = pgtypmod + 1;
1673  break;
1674  }
1675 
1676  /* We assume the worst case scenario for all of these:
1677  * date = '5874897-12-31' = 13
1678  * date = '294276-11-20' = 12 (with --enable-interger-datetimes)
1679  * time = '00:00:00' = 8
1680  * zone = '+01:39:52' = 9 (see Europe/Helsinki around 1915)
1681  */
1682 
1683  /* time */
1684  if (pgfieldtype == 1083)
1685  {
1686  dbffieldsize = 8 + secondsize;
1687  }
1688  /* timetz */
1689  else if (pgfieldtype == 1266)
1690  {
1691  dbffieldsize = 8 + secondsize + 9;
1692  }
1693  /* timestamp */
1694  else if (pgfieldtype == 1114)
1695  {
1696  dbffieldsize = 13 + 1 + 8 + secondsize;
1697  }
1698  /* timestamptz */
1699  else if (pgfieldtype == 1184)
1700  {
1701  dbffieldsize = 13 + 1 + 8 + secondsize + 9;
1702  }
1703 
1704  dbffieldtype = FTString;
1705  dbffielddecs = 0;
1706  }
1707 
1708  /*
1709  * uuid type 36 bytes (12345678-9012-3456-7890-123456789012)
1710  */
1711  else if (pgfieldtype == 2950)
1712  {
1713  dbffieldtype = FTString;
1714  dbffieldsize = 36;
1715  dbffielddecs = 0;
1716  }
1717 
1718  /*
1719  * For variable-sized fields we know about, we use
1720  * the maximum allowed size.
1721  * 1042 is bpchar, 1043 is varchar
1722  */
1723  else if ((pgfieldtype == 1042 || pgfieldtype == 1043) && pgtypmod != -1)
1724  {
1725  /*
1726  * mod is maximum allowed size, including
1727  * header which contains *real* size.
1728  */
1729  dbffieldtype = FTString;
1730  dbffieldsize = pgtypmod - 4; /* 4 is header size */
1731  dbffielddecs = 0;
1732  }
1733 
1734  /* For all other valid non-geometry/geography fields... */
1735  else if (dbffieldtype == -1)
1736  {
1737  /*
1738  * For types we don't know anything about, all
1739  * we can do is query the table for the maximum field
1740  * size.
1741  */
1742  dbffieldsize = getMaxFieldSize(state->conn, state->schema, state->table, pgfieldname);
1743  if (dbffieldsize == -1)
1744  {
1745  free(dbffieldname);
1746  return 0;
1747  }
1748 
1749  if (!dbffieldsize)
1750  dbffieldsize = 32;
1751 
1752  /* might 0 be a good size ? */
1753 
1754  dbffieldtype = FTString;
1755  dbffielddecs = 0;
1756 
1757  /* Check to make sure the final field size isn't too large */
1758  if (dbffieldsize > MAX_DBF_FIELD_SIZE)
1759  {
1760  /* Note: we concatenate all warnings from the main loop as this is useful information */
1761  snprintf(buf, 256, _("Warning: values of field '%s' exceeding maximum dbf field width (%d) "
1762  "will be truncated.\n"), dbffieldname, MAX_DBF_FIELD_SIZE);
1763  strncat(state->message, buf, SHPDUMPERMSGLEN - strlen(state->message));
1764  dbffieldsize = MAX_DBF_FIELD_SIZE;
1765 
1766  ret = SHPDUMPERWARN;
1767  }
1768  }
1769 
1770  LWDEBUGF(3, "DBF FIELD_NAME: %s, SIZE: %d\n", dbffieldname, dbffieldsize);
1771 
1772  if (dbffieldtype != 9)
1773  {
1774  /* Add the field to the DBF file */
1775  if (DBFAddField(state->dbf, dbffieldname, dbffieldtype, dbffieldsize, dbffielddecs) == -1)
1776  {
1777  snprintf(state->message, SHPDUMPERMSGLEN, _("Error: field %s of type %d could not be created."), dbffieldname, dbffieldtype);
1778 
1779  return SHPDUMPERERR;
1780  }
1781 
1782  /* Add the field information to our field arrays */
1783  state->dbffieldnames[state->fieldcount] = dbffieldname;
1784  state->dbffieldtypes[state->fieldcount] = dbffieldtype;
1785  state->pgfieldnames[state->fieldcount] = pgfieldname;
1786  state->pgfieldlens[state->fieldcount] = pgfieldlen;
1787  state->pgfieldtypmods[state->fieldcount] = pgtypmod;
1788 
1789  state->fieldcount++;
1790  }
1791  }
1792 
1793  /* Now we have generated the field lists, grab some info about the table */
1794  status = getTableInfo(state);
1795  if (status == SHPDUMPERERR)
1796  return SHPDUMPERERR;
1797 
1798  LWDEBUGF(3, "rows: %d\n", state->rowcount);
1799  LWDEBUGF(3, "shptype: %c\n", state->outtype);
1800  LWDEBUGF(3, "shpouttype: %d\n", state->outshptype);
1801 
1802  /* If we didn't find a geometry/geography column... */
1803  if (!state->geo_col_name)
1804  {
1805  if (state->config->geo_col_name)
1806  {
1807  /* A geo* column was specified, but not found */
1808  snprintf(state->message, SHPDUMPERMSGLEN, _("%s: no such attribute in table %s"), state->config->geo_col_name, state->table);
1809 
1810  return SHPDUMPERERR;
1811  }
1812  else
1813  {
1814  /* No geo* column specified so we can only create the DBF section -
1815  but let's issue a warning... */
1816  snprintf(buf, 256, _("No geometry column found.\nThe DBF file will be created but not the shx or shp files.\n"));
1817  strncat(state->message, buf, SHPDUMPERMSGLEN - strlen(state->message));
1818 
1819  state->shp = NULL;
1820 
1821  ret = SHPDUMPERWARN;
1822  }
1823  }
1824  else
1825  {
1826  /* Since we have found a geo* column, open the shapefile */
1827  state->shp = SHPCreate(state->shp_file, state->outshptype);
1828  if (!state->shp)
1829  {
1830  snprintf(state->message, SHPDUMPERMSGLEN, _("Could not open shapefile %s!"), state->shp_file);
1831 
1832  return SHPDUMPERERR;
1833  }
1834  }
1835 
1836 
1837  /* Now we have the complete list of fieldnames, let's generate the SQL query. First let's make sure
1838  we reserve enough space for tables with lots of columns */
1839  j = 0;
1840  /*TODO: this really should be rewritten to use stringbuffer */
1841  for (i = 0; i < state->fieldcount; i++)
1842  j += strlen( state->pgfieldnames[i]) + 10; /*add extra space for the quotes to quote identify and any embedded quotes that may need escaping */
1843 
1844  state->main_scan_query = malloc(1024 + j);
1845 
1846  sprintf(state->main_scan_query, "DECLARE cur ");
1847  if (state->config->binary)
1848  strcat(state->main_scan_query, "BINARY ");
1849 
1850  strcat(state->main_scan_query, "CURSOR FOR SELECT ");
1851 
1852  for (i = 0; i < state->fieldcount; i++)
1853  {
1854  /* Comma-separated column names */
1855  if (i > 0)
1856  strcat(state->main_scan_query, ",");
1857 
1858  if (state->config->binary)
1859  sprintf(buf, "%s::text", quote_identifier(state->pgfieldnames[i]) ) ;
1860  else
1861  sprintf(buf, "%s", quote_identifier(state->pgfieldnames[i]) );
1862 
1863  strcat(state->main_scan_query, buf);
1864  }
1865 
1866  /* If we found a valid geometry/geography column then use it */
1867  if (state->geo_col_name)
1868  {
1869  /* If this is the (only) column, no need for the initial comma */
1870  if (state->fieldcount > 0)
1871  strcat(state->main_scan_query, ",");
1872 
1873  if (state->big_endian)
1874  {
1875  if (state->pgis_major_version > 0)
1876  {
1877  sprintf(buf, "ST_asEWKB(ST_SetSRID(%s::geometry, 0), 'XDR') AS _geoX", quote_identifier(state->geo_col_name) );
1878  }
1879  else
1880  {
1881  sprintf(buf, "asbinary(%s::geometry, 'XDR') AS _geoX",
1882  quote_identifier(state->geo_col_name) );
1883  }
1884  }
1885  else /* little_endian */
1886  {
1887  if (state->pgis_major_version > 0)
1888  {
1889  sprintf(buf, "ST_AsEWKB(ST_SetSRID(%s::geometry, 0), 'NDR') AS _geoX", quote_identifier(state->geo_col_name) ) ;
1890  }
1891  else
1892  {
1893  sprintf(buf, "asbinary(%s::geometry, 'NDR') AS _geoX",
1894  quote_identifier(state->geo_col_name) );
1895  }
1896  }
1897 
1898  strcat(state->main_scan_query, buf);
1899  }
1900 
1901  if (state->schema)
1902  {
1903  sprintf(buf, " FROM \"%s\".\"%s\"", state->schema, state->table);
1904  }
1905  else
1906  {
1907  sprintf(buf, " FROM \"%s\"", state->table);
1908  }
1909 
1910  strcat(state->main_scan_query, buf);
1911 
1912  /* Order by 'gid' (if found) */
1913  if (gidfound)
1914  {
1915  sprintf(buf, " ORDER BY \"gid\"");
1916  strcat(state->main_scan_query, buf);
1917  }
1918 
1919  /* Now we've finished with the result set, we can dispose of it */
1920  PQclear(res);
1921 
1922  LWDEBUGF(3, "FINAL QUERY: %s\n", state->main_scan_query);
1923 
1924  /*
1925  * Begin the transaction
1926  * (a cursor can only be defined inside a transaction block)
1927  */
1928  res = PQexec(state->conn, "BEGIN");
1929  if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
1930  {
1931  snprintf(state->message, SHPDUMPERMSGLEN, _("Error starting transaction: %s"), PQresultErrorMessage(res));
1932  PQclear(res);
1933  return SHPDUMPERERR;
1934  }
1935 
1936  PQclear(res);
1937 
1938  /* Execute the main scan query */
1939  res = PQexec(state->conn, state->main_scan_query);
1940  if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
1941  {
1942  snprintf(state->message, SHPDUMPERMSGLEN, _("Error executing main scan query: %s"), PQresultErrorMessage(res));
1943  PQclear(res);
1944  return SHPDUMPERERR;
1945  }
1946 
1947  PQclear(res);
1948 
1949  /* Setup initial scan state */
1950  state->currow = 0;
1951  state->curresrow = 0;
1952  state->currescount = 0;
1953  state->fetchres = NULL;
1954 
1955  /* Generate the fetch query */
1956  state->fetch_query = malloc(256);
1957  sprintf(state->fetch_query, "FETCH %d FROM cur", state->config->fetchsize);
1958 
1959  return SHPDUMPEROK;
1960 }
char * column_map_filename
tuple res
Definition: window.py:78
int colmap_read(const char *filename, colmap *map, char *errbuf, size_t errbuflen)
Read the content of filename into a symbol map.
Definition: shpcommon.c:211
#define _(String)
Definition: shpcommon.h:24
SHPDUMPERCONFIG * config
DBFHandle SHPAPI_CALL DBFCreateEx(const char *pszFilename, const char *pszCodePage)
Definition: dbfopen.c:641
SHPHandle SHPAPI_CALL SHPCreate(const char *pszShapeFile, int nShapeType)
Definition: shpopen.c:828
static int getTableInfo(SHPDUMPERSTATE *state)
#define MAX_DBF_FIELD_SIZE
static int getMaxFieldSize(PGconn *conn, char *schema, char *table, char *fname)
#define SHPDUMPERERR
int SHPAPI_CALL DBFAddField(DBFHandle psDBF, const char *pszFieldName, DBFFieldType eType, int nWidth, int nDecimals)
Definition: dbfopen.c:773
#define SHPDUMPERMSGLEN
char * encoding2codepage(const char *encoding)
Definition: shpcommon.c:341
char * quote_identifier(const char *s)
#define SHPDUMPEROK
void free(void *)
void * malloc(YYSIZE_T)
#define SHPDUMPERWARN
char message[SHPDUMPERMSGLEN]
#define LWDEBUGF(level, msg,...)
Definition: lwgeom_log.h:88
const char * colmap_dbf_by_pg(colmap *map, const char *pgname)
Definition: shpcommon.c:183
PGresult * fetchres

Here is the call graph for this function:

Here is the caller graph for this function: