PostGIS  2.2.8dev-r@@SVN_REVISION@@

◆ ShpDumperOpenTable()

int ShpDumperOpenTable ( SHPDUMPERSTATE state)

Definition at line 1331 of file pgsql2shp-core.c.

References _, shp_dumper_state::big_endian, shp_dumper_config::binary, colmap_dbf_by_pg(), colmap_read(), shp_dumper_state::column_map, shp_dumper_config::column_map_filename, shp_dumper_state::config, shp_dumper_state::conn, shp_dumper_state::currescount, shp_dumper_state::curresrow, shp_dumper_state::currow, shp_dumper_state::dbf, DBFAddField(), DBFCreateEx(), shp_dumper_state::dbffieldnames, shp_dumper_state::dbffieldtypes, encoding2codepage(), shp_dumper_state::fetch_query, shp_dumper_state::fetchres, shp_dumper_config::fetchsize, shp_dumper_state::fieldcount, free(), shp_dumper_config::geo_col_name, shp_dumper_state::geo_col_name, shp_dumper_state::geog_oid, shp_dumper_state::geom_oid, getMaxFieldSize(), getTableInfo(), shp_dumper_config::includegid, shp_dumper_config::keep_fieldname_case, LWDEBUGF, shp_dumper_state::main_scan_query, malloc(), MAX_DBF_FIELD_SIZE, shp_dumper_state::message, shp_dumper_state::outshptype, shp_dumper_state::outtype, shp_dumper_state::pgfieldlens, shp_dumper_state::pgfieldnames, shp_dumper_state::pgfieldtypmods, shp_dumper_state::pgis_major_version, window::res, shp_dumper_state::rowcount, shp_dumper_config::schema, shp_dumper_state::schema, shp_dumper_state::shp, shp_dumper_config::shp_file, shp_dumper_state::shp_file, SHPCreate(), SHPDUMPERERR, SHPDUMPERMSGLEN, SHPDUMPEROK, SHPDUMPERWARN, shp_dumper_config::table, shp_dumper_state::table, shp_dumper_config::unescapedattrs, and shp_dumper_config::usrquery.

Referenced by main(), and pgui_action_export().

1332 {
1333  PGresult *res;
1334 
1335  char buf[256];
1336  char *query;
1337  int gidfound = 0, i, j, ret, status;
1338 
1339 
1340  /* Open the column map if one was specified */
1341  if (state->config->column_map_filename)
1342  {
1343  ret = colmap_read(state->config->column_map_filename,
1344  &state->column_map, state->message, SHPDUMPERMSGLEN);
1345  if (!ret) return SHPDUMPERERR;
1346  }
1347 
1348  /* If a user-defined query has been specified, create and point the state to our new table */
1349  if (state->config->usrquery)
1350  {
1351  state->table = malloc(20 + 20); /* string + max long precision */
1352  sprintf(state->table, "__pgsql2shp%lu_tmp_table", (long)getpid());
1353 
1354  query = malloc(32 + strlen(state->table) + strlen(state->config->usrquery));
1355 
1356  sprintf(query, "CREATE TEMP TABLE \"%s\" AS %s", state->table, state->config->usrquery);
1357  res = PQexec(state->conn, query);
1358  free(query);
1359 
1360  /* Execute the code to create the table */
1361  if (PQresultStatus(res) != PGRES_COMMAND_OK)
1362  {
1363  snprintf(state->message, SHPDUMPERMSGLEN, _("Error executing user query: %s"), PQresultErrorMessage(res));
1364  PQclear(res);
1365  return SHPDUMPERERR;
1366  }
1367  }
1368  else
1369  {
1370  /* Simply point the state to copies of the supplied schema and table */
1371  state->table = strdup(state->config->table);
1372  if (state->config->schema)
1373  state->schema = strdup(state->config->schema);
1374  }
1375 
1376 
1377  /* Get the list of columns and their types for the selected table */
1378  if (state->schema)
1379  {
1380  query = malloc(250 + strlen(state->schema) + strlen(state->table));
1381 
1382  sprintf(query, "SELECT a.attname, a.atttypid, "
1383  "a.atttypmod, a.attlen FROM "
1384  "pg_attribute a, pg_class c, pg_namespace n WHERE "
1385  "n.nspname = '%s' AND a.attrelid = c.oid AND "
1386  "n.oid = c.relnamespace AND "
1387  "a.atttypid != 0 AND "
1388  "a.attnum > 0 AND c.relname = '%s'", state->schema, state->table);
1389  }
1390  else
1391  {
1392  query = malloc(250 + strlen(state->table));
1393 
1394  sprintf(query, "SELECT a.attname, a.atttypid, "
1395  "a.atttypmod, a.attlen FROM "
1396  "pg_attribute a, pg_class c WHERE "
1397  "a.attrelid = c.oid and a.attnum > 0 AND "
1398  "a.atttypid != 0 AND "
1399  "c.relname = '%s' AND "
1400  "pg_catalog.pg_table_is_visible(c.oid)", state->table);
1401  }
1402 
1403  LWDEBUGF(3, "query is: %s\n", query);
1404 
1405  res = PQexec(state->conn, query);
1406  free(query);
1407 
1408  if (PQresultStatus(res) != PGRES_TUPLES_OK)
1409  {
1410  snprintf(state->message, SHPDUMPERMSGLEN, _("Error querying for attributes: %s"), PQresultErrorMessage(res));
1411  PQclear(res);
1412  return SHPDUMPERERR;
1413  }
1414 
1415  if (!PQntuples(res))
1416  {
1417  snprintf(state->message, SHPDUMPERMSGLEN, _("Table %s does not exist"), state->table);
1418  PQclear(res);
1419  return SHPDUMPERERR;
1420  }
1421 
1422  /* If a shapefile name was specified, use it. Otherwise simply use the table name. */
1423  if (state->config->shp_file != NULL)
1424  state->shp_file = state->config->shp_file;
1425  else
1426  state->shp_file = state->table;
1427 
1428  /* Create the dbf file: */
1429  /* If there's a user-specified encoding hanging around, try and use that. */
1430  /* Otherwise, just use UTF-8 encoding, since that's usually our client encoding. */
1431  if ( getenv("PGCLIENTENCODING") )
1432  {
1433  char *codepage = encoding2codepage(getenv("PGCLIENTENCODING"));
1434  state->dbf = DBFCreateEx(state->shp_file, codepage);
1435  }
1436  else
1437  {
1438  state->dbf = DBFCreateEx(state->shp_file, "UTF-8");
1439  }
1440 
1441  if (!state->dbf)
1442  {
1443  snprintf(state->message, SHPDUMPERMSGLEN, _("Could not create dbf file %s"), state->shp_file);
1444  return SHPDUMPERERR;
1445  }
1446 
1447  /*
1448  * Scan the result setting fields to be returned in mainscan
1449  * query, filling the type_ary, and creating .dbf and .shp files.
1450  */
1451  state->dbffieldnames = malloc(sizeof(char *) * PQntuples(res));
1452  state->dbffieldtypes = malloc(sizeof(int) * PQntuples(res));
1453  state->pgfieldnames = malloc(sizeof(char *) * PQntuples(res));
1454  state->pgfieldlens = malloc(sizeof(int) * PQntuples(res));
1455  state->pgfieldtypmods = malloc(sizeof(int) * PQntuples(res));
1456  state->fieldcount = 0;
1457  int tmpint = 1;
1458 
1459  for (i = 0; i < PQntuples(res); i++)
1460  {
1461  char *ptr;
1462 
1463  int pgfieldtype, pgtypmod, pgfieldlen;
1464  char *pgfieldname;
1465 
1466  int dbffieldtype, dbffieldsize, dbffielddecs;
1467  char *dbffieldname;
1468 
1469  pgfieldname = PQgetvalue(res, i, 0);
1470  pgfieldtype = atoi(PQgetvalue(res, i, 1));
1471  pgtypmod = atoi(PQgetvalue(res, i, 2));
1472  pgfieldlen = atoi(PQgetvalue(res, i, 3));
1473  dbffieldtype = -1;
1474  dbffieldsize = 0;
1475  dbffielddecs = 0;
1476 
1477  /*
1478  * This is a geometry/geography column
1479  */
1480  if (pgfieldtype == state->geom_oid || pgfieldtype == state->geog_oid)
1481  {
1482  /* If no geometry/geography column has been found yet... */
1483  if (!state->geo_col_name)
1484  {
1485  /* If either no geo* column name was provided (in which case this is
1486  the first match) or we match the provided column name, we have
1487  found our geo* column */
1488  if (!state->config->geo_col_name || !strcmp(state->config->geo_col_name, pgfieldname))
1489  {
1490  dbffieldtype = 9;
1491 
1492  state->geo_col_name = strdup(pgfieldname);
1493  }
1494  }
1495  }
1496 
1497  /*
1498  * Everything else (non geometries) will be
1499  * a DBF attribute.
1500  */
1501 
1502  /* Skip gid (if not asked to do otherwise */
1503  if (!strcmp(pgfieldname, "gid") )
1504  {
1505  gidfound = 1;
1506 
1507  if (!state->config->includegid)
1508  continue;
1509  }
1510 
1511  /* Unescape any reserved column names */
1512  ptr = pgfieldname;
1513  if (!state->config->unescapedattrs)
1514  {
1515  if (*ptr == '_')
1516  ptr += 2;
1517  }
1518 
1519  /*
1520  * This needs special handling since both xmin and _xmin
1521  * becomes __xmin when escaped
1522  */
1523 
1524  /* Limit dbf field name to 10-digits */
1525  dbffieldname = malloc(11);
1526  strncpy(dbffieldname, ptr, 10);
1527  dbffieldname[10] = '\0';
1528 
1529  /* If a column map file has been passed in,
1530  * use this to create the dbf field name from
1531  * the PostgreSQL column name */
1532  {
1533  const char *mapped = colmap_dbf_by_pg(&state->column_map, dbffieldname);
1534  if (mapped)
1535  {
1536  strncpy(dbffieldname, mapped, 10);
1537  dbffieldname[10] = '\0';
1538  }
1539  }
1540 
1541  /*
1542  * make sure the fields all have unique names,
1543  */
1544  tmpint = 1;
1545  for (j = 0; j < state->fieldcount; j++)
1546  {
1547  if (!strncasecmp(dbffieldname, state->dbffieldnames[j], 10))
1548  {
1549  sprintf(dbffieldname, "%.7s_%.2d", ptr, tmpint++);
1550  continue;
1551  }
1552  }
1553 
1554  /* make UPPERCASE if keep_fieldname_case = 0 */
1555  if (!state->config->keep_fieldname_case)
1556  for (j = 0; j < strlen(dbffieldname); j++)
1557  dbffieldname[j] = toupper(dbffieldname[j]);
1558 
1559  /* Issue warning if column has been renamed */
1560  if (strcasecmp(dbffieldname, pgfieldname))
1561  {
1562  /* Note: we concatenate all warnings from the main loop as this is useful information */
1563  snprintf(buf, 256, _("Warning, field %s renamed to %s\n"), pgfieldname, dbffieldname);
1564  strncat(state->message, buf, SHPDUMPERMSGLEN - strlen(state->message));
1565 
1566  ret = SHPDUMPERWARN;
1567  }
1568 
1569 
1570  /*
1571  * Find appropriate type of dbf attributes
1572  */
1573 
1574  /* int2 type */
1575  if (pgfieldtype == 21)
1576  {
1577  /*
1578  * Longest text representation for
1579  * an int2 type (16bit) is 6 bytes
1580  * (-32768)
1581  */
1582  dbffieldtype = FTInteger;
1583  dbffieldsize = 6;
1584  dbffielddecs = 0;
1585  }
1586 
1587  /* int4 type */
1588  else if (pgfieldtype == 23)
1589  {
1590  /*
1591  * Longest text representation for
1592  * an int4 type (32bit) is 11 bytes
1593  * (-2147483648)
1594  */
1595  dbffieldtype = FTInteger;
1596  dbffieldsize = 11;
1597  dbffielddecs = 0;
1598  }
1599 
1600  /* int8 type */
1601  else if (pgfieldtype == 20)
1602  {
1603  /*
1604  * Longest text representation for
1605  * an int8 type (64bit) is 20 bytes
1606  * (-9223372036854775808)
1607  */
1608  dbffieldtype = FTInteger;
1609  dbffieldsize = 19;
1610  dbffielddecs = 0;
1611  }
1612 
1613  /*
1614  * double or numeric types:
1615  * 700: float4
1616  * 701: float8
1617  * 1700: numeric
1618  *
1619  *
1620  * TODO: stricter handling of sizes
1621  */
1622  else if (pgfieldtype == 700 || pgfieldtype == 701 || pgfieldtype == 1700)
1623  {
1624  dbffieldtype = FTDouble;
1625  dbffieldsize = 32;
1626  dbffielddecs = 10;
1627  }
1628 
1629  /*
1630  * Boolean field, we use FTLogical
1631  */
1632  else if (pgfieldtype == 16)
1633  {
1634  dbffieldtype = FTLogical;
1635  dbffieldsize = 2;
1636  dbffielddecs = 0;
1637  }
1638 
1639  /*
1640  * Date field
1641  */
1642  else if (pgfieldtype == 1082)
1643  {
1644  dbffieldtype = FTDate;
1645  dbffieldsize = 8;
1646  dbffielddecs = 0;
1647  }
1648 
1649  /*
1650  * time, timetz, timestamp, or timestamptz field.
1651  */
1652  else if (pgfieldtype == 1083 || pgfieldtype == 1266 || pgfieldtype == 1114 || pgfieldtype == 1184)
1653  {
1654  int secondsize;
1655 
1656  switch (pgtypmod)
1657  {
1658  case -1:
1659  secondsize = 6 + 1;
1660  break;
1661  case 0:
1662  secondsize = 0;
1663  break;
1664  default:
1665  secondsize = pgtypmod + 1;
1666  break;
1667  }
1668 
1669  /* We assume the worst case scenario for all of these:
1670  * date = '5874897-12-31' = 13
1671  * date = '294276-11-20' = 12 (with --enable-interger-datetimes)
1672  * time = '00:00:00' = 8
1673  * zone = '+01:39:52' = 9 (see Europe/Helsinki around 1915)
1674  */
1675 
1676  /* time */
1677  if (pgfieldtype == 1083)
1678  {
1679  dbffieldsize = 8 + secondsize;
1680  }
1681  /* timetz */
1682  else if (pgfieldtype == 1266)
1683  {
1684  dbffieldsize = 8 + secondsize + 9;
1685  }
1686  /* timestamp */
1687  else if (pgfieldtype == 1114)
1688  {
1689  dbffieldsize = 13 + 1 + 8 + secondsize;
1690  }
1691  /* timestamptz */
1692  else if (pgfieldtype == 1184)
1693  {
1694  dbffieldsize = 13 + 1 + 8 + secondsize + 9;
1695  }
1696 
1697  dbffieldtype = FTString;
1698  dbffielddecs = 0;
1699  }
1700 
1701  /*
1702  * uuid type 36 bytes (12345678-9012-3456-7890-123456789012)
1703  */
1704  else if (pgfieldtype == 2950)
1705  {
1706  dbffieldtype = FTString;
1707  dbffieldsize = 36;
1708  dbffielddecs = 0;
1709  }
1710 
1711  /*
1712  * For variable-sized fields we know about, we use
1713  * the maximum allowed size.
1714  * 1042 is bpchar, 1043 is varchar
1715  */
1716  else if ((pgfieldtype == 1042 || pgfieldtype == 1043) && pgtypmod != -1)
1717  {
1718  /*
1719  * mod is maximum allowed size, including
1720  * header which contains *real* size.
1721  */
1722  dbffieldtype = FTString;
1723  dbffieldsize = pgtypmod - 4; /* 4 is header size */
1724  dbffielddecs = 0;
1725  }
1726 
1727  /* For all other valid non-geometry/geography fields... */
1728  else if (dbffieldtype == -1)
1729  {
1730  /*
1731  * For types we don't know anything about, all
1732  * we can do is query the table for the maximum field
1733  * size.
1734  */
1735  dbffieldsize = getMaxFieldSize(state->conn, state->schema, state->table, pgfieldname);
1736  if (dbffieldsize == -1)
1737  {
1738  free(dbffieldname);
1739  return 0;
1740  }
1741 
1742  if (!dbffieldsize)
1743  dbffieldsize = 32;
1744 
1745  /* might 0 be a good size ? */
1746 
1747  dbffieldtype = FTString;
1748  dbffielddecs = 0;
1749 
1750  /* Check to make sure the final field size isn't too large */
1751  if (dbffieldsize > MAX_DBF_FIELD_SIZE)
1752  {
1753  /* Note: we concatenate all warnings from the main loop as this is useful information */
1754  snprintf(buf, 256, _("Warning: values of field '%s' exceeding maximum dbf field width (%d) "
1755  "will be truncated.\n"), dbffieldname, MAX_DBF_FIELD_SIZE);
1756  strncat(state->message, buf, SHPDUMPERMSGLEN - strlen(state->message));
1757  dbffieldsize = MAX_DBF_FIELD_SIZE;
1758 
1759  ret = SHPDUMPERWARN;
1760  }
1761  }
1762 
1763  LWDEBUGF(3, "DBF FIELD_NAME: %s, SIZE: %d\n", dbffieldname, dbffieldsize);
1764 
1765  if (dbffieldtype != 9)
1766  {
1767  /* Add the field to the DBF file */
1768  if (DBFAddField(state->dbf, dbffieldname, dbffieldtype, dbffieldsize, dbffielddecs) == -1)
1769  {
1770  snprintf(state->message, SHPDUMPERMSGLEN, _("Error: field %s of type %d could not be created."), dbffieldname, dbffieldtype);
1771 
1772  return SHPDUMPERERR;
1773  }
1774 
1775  /* Add the field information to our field arrays */
1776  state->dbffieldnames[state->fieldcount] = dbffieldname;
1777  state->dbffieldtypes[state->fieldcount] = dbffieldtype;
1778  state->pgfieldnames[state->fieldcount] = pgfieldname;
1779  state->pgfieldlens[state->fieldcount] = pgfieldlen;
1780  state->pgfieldtypmods[state->fieldcount] = pgtypmod;
1781 
1782  state->fieldcount++;
1783  }
1784  }
1785 
1786  /* Now we have generated the field lists, grab some info about the table */
1787  status = getTableInfo(state);
1788  if (status == SHPDUMPERERR)
1789  return SHPDUMPERERR;
1790 
1791  LWDEBUGF(3, "rows: %d\n", state->rowcount);
1792  LWDEBUGF(3, "shptype: %c\n", state->outtype);
1793  LWDEBUGF(3, "shpouttype: %d\n", state->outshptype);
1794 
1795  /* If we didn't find a geometry/geography column... */
1796  if (!state->geo_col_name)
1797  {
1798  if (state->config->geo_col_name)
1799  {
1800  /* A geo* column was specified, but not found */
1801  snprintf(state->message, SHPDUMPERMSGLEN, _("%s: no such attribute in table %s"), state->config->geo_col_name, state->table);
1802 
1803  return SHPDUMPERERR;
1804  }
1805  else
1806  {
1807  /* No geo* column specified so we can only create the DBF section -
1808  but let's issue a warning... */
1809  snprintf(buf, 256, _("No geometry column found.\nThe DBF file will be created but not the shx or shp files.\n"));
1810  strncat(state->message, buf, SHPDUMPERMSGLEN - strlen(state->message));
1811 
1812  state->shp = NULL;
1813 
1814  ret = SHPDUMPERWARN;
1815  }
1816  }
1817  else
1818  {
1819  /* Since we have found a geo* column, open the shapefile */
1820  state->shp = SHPCreate(state->shp_file, state->outshptype);
1821  if (!state->shp)
1822  {
1823  snprintf(state->message, SHPDUMPERMSGLEN, _("Could not open shapefile %s!"), state->shp_file);
1824 
1825  return SHPDUMPERERR;
1826  }
1827  }
1828 
1829 
1830  /* Now we have the complete list of fieldnames, let's generate the SQL query. First let's make sure
1831  we reserve enough space for tables with lots of columns */
1832  j = 0;
1833  for (i = 0; i < state->fieldcount; i++)
1834  j += strlen(state->pgfieldnames[i] + 2); /* Add 2 for leading and trailing quotes */
1835 
1836  state->main_scan_query = malloc(1024 + j);
1837 
1838  sprintf(state->main_scan_query, "DECLARE cur ");
1839  if (state->config->binary)
1840  strcat(state->main_scan_query, "BINARY ");
1841 
1842  strcat(state->main_scan_query, "CURSOR FOR SELECT ");
1843 
1844  for (i = 0; i < state->fieldcount; i++)
1845  {
1846  /* Comma-separated column names */
1847  if (i > 0)
1848  strcat(state->main_scan_query, ",");
1849 
1850  if (state->config->binary)
1851  sprintf(buf, "\"%s\"::text", state->pgfieldnames[i]);
1852  else
1853  sprintf(buf, "\"%s\"", state->pgfieldnames[i]);
1854 
1855  strcat(state->main_scan_query, buf);
1856  }
1857 
1858  /* If we found a valid geometry/geography column then use it */
1859  if (state->geo_col_name)
1860  {
1861  /* If this is the (only) column, no need for the initial comma */
1862  if (state->fieldcount > 0)
1863  strcat(state->main_scan_query, ",");
1864 
1865  if (state->big_endian)
1866  {
1867  if (state->pgis_major_version > 0)
1868  {
1869  sprintf(buf, "ST_asEWKB(ST_SetSRID(\"%s\"::geometry, 0), 'XDR') AS _geoX", state->geo_col_name);
1870  }
1871  else
1872  {
1873  sprintf(buf, "asbinary(\"%s\"::geometry, 'XDR') AS _geoX",
1874  state->geo_col_name);
1875  }
1876  }
1877  else /* little_endian */
1878  {
1879  if (state->pgis_major_version > 0)
1880  {
1881  sprintf(buf, "ST_AsEWKB(ST_SetSRID(\"%s\"::geometry, 0), 'NDR') AS _geoX", state->geo_col_name);
1882  }
1883  else
1884  {
1885  sprintf(buf, "asbinary(\"%s\"::geometry, 'NDR') AS _geoX",
1886  state->geo_col_name);
1887  }
1888  }
1889 
1890  strcat(state->main_scan_query, buf);
1891  }
1892 
1893  if (state->schema)
1894  {
1895  sprintf(buf, " FROM \"%s\".\"%s\"", state->schema, state->table);
1896  }
1897  else
1898  {
1899  sprintf(buf, " FROM \"%s\"", state->table);
1900  }
1901 
1902  strcat(state->main_scan_query, buf);
1903 
1904  /* Order by 'gid' (if found) */
1905  if (gidfound)
1906  {
1907  sprintf(buf, " ORDER BY \"gid\"");
1908  strcat(state->main_scan_query, buf);
1909  }
1910 
1911  /* Now we've finished with the result set, we can dispose of it */
1912  PQclear(res);
1913 
1914  LWDEBUGF(3, "FINAL QUERY: %s\n", state->main_scan_query);
1915 
1916  /*
1917  * Begin the transaction
1918  * (a cursor can only be defined inside a transaction block)
1919  */
1920  res = PQexec(state->conn, "BEGIN");
1921  if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
1922  {
1923  snprintf(state->message, SHPDUMPERMSGLEN, _("Error starting transaction: %s"), PQresultErrorMessage(res));
1924  PQclear(res);
1925  return SHPDUMPERERR;
1926  }
1927 
1928  PQclear(res);
1929 
1930  /* Execute the main scan query */
1931  res = PQexec(state->conn, state->main_scan_query);
1932  if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
1933  {
1934  snprintf(state->message, SHPDUMPERMSGLEN, _("Error executing main scan query: %s"), PQresultErrorMessage(res));
1935  PQclear(res);
1936  return SHPDUMPERERR;
1937  }
1938 
1939  PQclear(res);
1940 
1941  /* Setup initial scan state */
1942  state->currow = 0;
1943  state->curresrow = 0;
1944  state->currescount = 0;
1945  state->fetchres = NULL;
1946 
1947  /* Generate the fetch query */
1948  state->fetch_query = malloc(256);
1949  sprintf(state->fetch_query, "FETCH %d FROM cur", state->config->fetchsize);
1950 
1951  return SHPDUMPEROK;
1952 }
char * column_map_filename
tuple res
Definition: window.py:78
int colmap_read(const char *filename, colmap *map, char *errbuf, size_t errbuflen)
Read the content of filename into a symbol map.
Definition: shpcommon.c:131
#define _(String)
Definition: shpcommon.h:24
SHPDUMPERCONFIG * config
DBFHandle SHPAPI_CALL DBFCreateEx(const char *pszFilename, const char *pszCodePage)
Definition: dbfopen.c:641
SHPHandle SHPAPI_CALL SHPCreate(const char *pszShapeFile, int nShapeType)
Definition: shpopen.c:828
static int getTableInfo(SHPDUMPERSTATE *state)
#define MAX_DBF_FIELD_SIZE
static int getMaxFieldSize(PGconn *conn, char *schema, char *table, char *fname)
#define SHPDUMPERERR
int SHPAPI_CALL DBFAddField(DBFHandle psDBF, const char *pszFieldName, DBFFieldType eType, int nWidth, int nDecimals)
Definition: dbfopen.c:769
#define SHPDUMPERMSGLEN
char * encoding2codepage(const char *encoding)
Definition: shpcommon.c:259
#define SHPDUMPEROK
void free(void *)
void * malloc(YYSIZE_T)
#define SHPDUMPERWARN
char message[SHPDUMPERMSGLEN]
#define LWDEBUGF(level, msg,...)
Definition: lwgeom_log.h:55
const char * colmap_dbf_by_pg(colmap *map, const char *pgname)
Definition: shpcommon.c:103
PGresult * fetchres
Here is the call graph for this function:
Here is the caller graph for this function: