import json
f = open('addressbook.json')
for addressbook in json.load(f):
print addressbook['name']
print addressbook['phone']
References :
http://stackoverflow.com/questions/2835559/parsing-values-from-a-json-file-in-python
import json
f = open('addressbook.json')
for addressbook in json.load(f):
print addressbook['name']
print addressbook['phone']
# apt-get install libmysqlclient-dev
github = OAuth2Service(name='github',
authorize_url='https://github.com/login/oauth/authorize',
access_token_url='https://github.com/login/oauth/access_token',
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
base_url='https://api.github.com/',
)
facebook = OAuth2Service(name='facebook',
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
client_id=config.get('FACEBOOK', 'client_id'),
client_secret=config.get('FACEBOOK', 'client_secret'),
base_url='https://graph.facebook.com/',
)
google = OAuth2Service(name='google',
authorize_url='https://accounts.google.com/o/oauth2/auth',
access_token_url='https://accounts.google.com/o/oauth2/token',
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
base_url='https://www.googleapis.com/oauth2/v1/',
)
service.get_auth_session(data=data, decoder=json.loads)
logfile-maxbytes = 20MB// logfile-backups 爲 log 檔案的個數,設爲 0 的話則只保留一個,其他則予以刪除
logfile-backups = 5// stdout 與 stderr 也可獨立設定
stdout_logfile_maxbytes
stdout_logfile_backups
stderr_logfile_maxbytes
stderr_logfile_backups
# apt-get install snmptrapd
# vi /etc/default/snmptrapd
TRAPDRUN=yes
# 預設 log 儲存在 syslog,若要更改要指定 -Lf
TRAPDOPTS='-Lsd -p /run/snmptrapd.pid -Lf /var/log/snmptrapd.log'
# vi /etc/snmp/snmptrapd.conf
format1 "%a %N %T"
authCommunity log public
authCommunity execute public
traphandle default /usr/bin/php /tmp/snmptrap.php
disableAuthorization yes
$ snmptrap -v 1 -c public 127.0.0.1 '' 127.0.0.1 6 1 '1111'
$ cd $OPENSHIFT_DATA_DIR
直接使用 db.create_all() 會出錯,改成以下
with app.app_context(): db.create_all()
import os
SQLALCHEMY_DATABASE_URI = os.environ['OPENSHIFT_POSTGRESQL_DB_URL']
import os
SQLALCHEMY_DATABASE_URI = os.environ['OPENSHIFT_MYSQL_DB_URL']
soup.find('li').findAll('a')
for link in soup.findAll('a'):
print link['href']
# supervisorctl -c <(echo "[supervisorctl]") status
# docker ps
# docker inspect container_name | grep IPAddress
# iptables -t nat -A DOCKER -p tcp --dport 8002 -j DNAT --to-destination 172.17.0.19:8000
# pip install uWSGI
--with-mysql-dir=/usr/
# Start foo as a process p = multiprocessing.Process(target=foo, name="Foo", args=(10,)) p.start() # Wait 10 seconds for foo time.sleep(10) # Terminate foo p.terminate() # Cleanup p.join()
References :
python - Stop code after time period - Stack Overflow
$( ".selector" ).draggable();
$( ".selector" ).droppable();
mysql> ALTER TABLE tbl PARTITION BY KEY(col1) PARTITIONS 5;
mysql> ALTER TABLE tbl partition by range(`day`) (
partition p_2012 values less than (20130000),
partition p_2013 values less than (20140000)
);
mysql> ALERT TABLE tbl DROP PARTITION p0;
mysql> ALTER TABLE tbl REBUILD PARTITION p0, p1;
mysql> ALTER TABLE tbl ANALYZE PARTITION p0, p1;
mysql> ALTER TABLE tbl OPTIMIZE PARTITION p0, p1;
mysql> ALTER TABLE tbl REPAIR PARTITION p0, p1;mysql> ALTER TABLE tbl CHECK PARTITION p0, p1;
SELECT TABLE_NAME, PARTITION_NAME FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_SCHEMA = 'db_name' ;
SHOW CREATE TABLE tbl_name;
SELECT PARTITION_ORDINAL_POSITION, TABLE_ROWS, PARTITION_METHOD
FROM information_schema.PARTITIONS
WHERE TABLE_SCHEMA = 'db_name' AND TABLE_NAME = 'tbl_name';